code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : Optional[Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
__A : List[str] = (boundary[1] - boundary[0]) / steps
__A : Optional[Any] = boundary[0]
__A : List[str] = boundary[1]
__A : str = make_points(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
__A : Optional[int] = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _lowercase ( UpperCamelCase__ : Any, UpperCamelCase__ : List[Any], UpperCamelCase__ : int ):
__A : Optional[Any] = a + h
while x < (b - h):
yield x
__A : int = x + h
def _lowercase ( UpperCamelCase__ : Union[str, Any] ): # enter your function here
__A : Optional[int] = (x - 0) * (x - 0)
return y
def _lowercase ( ):
__A : List[Any] = 0.0 # Lower bound of integration
__A : List[str] = 1.0 # Upper bound of integration
__A : Optional[Any] = 10.0 # define number of steps or resolution
__A : int = [a, b] # define boundary of integration
__A : List[str] = method_a(UpperCamelCase__, UpperCamelCase__ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 365 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _lowerCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
__A : Dict = img
__A : Dict = img.shape[1]
__A : Tuple = img.shape[0]
__A : List[str] = dst_width
__A : Tuple = dst_height
__A : Optional[Any] = self.src_w / self.dst_w
__A : List[str] = self.src_h / self.dst_h
__A : Dict = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def snake_case__ ( self ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__A : List[str] = self.img[self.get_y(__lowercase )][self.get_x(__lowercase )]
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
return int(self.ratio_x * x )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCAmelCase_ , UpperCAmelCase_ : str = 8_0_0, 6_0_0
UpperCAmelCase_ : str = imread('image_data/lena.jpg', 1)
UpperCAmelCase_ : List[str] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 365 | 1 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
snake_case = JukeboxTokenizer
snake_case = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ):
import torch
lowerCamelCase__ = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
lowerCamelCase__ = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowerCamelCase__ = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ):
import torch
lowerCamelCase__ = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
lowerCamelCase__ = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowerCamelCase__ = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 707 |
"""simple docstring"""
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = len(__lowercase )
while cur > 1:
# Find the maximum number in arr
lowerCamelCase__ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCamelCase__ = arr[mi::-1] + arr[mi + 1 : len(__lowercase )]
# Reverse whole list
lowerCamelCase__ = arr[cur - 1 :: -1] + arr[cur : len(__lowercase )]
cur -= 1
return arr
if __name__ == "__main__":
__magic_name__ = input("""Enter numbers separated by a comma:\n""").strip()
__magic_name__ = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 258 | 0 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( lowercase_):
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , )-> Optional[Any]:
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=UpperCamelCase__ , speech_processor=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , )
def UpperCAmelCase_ ( self , A_ = "auto" )-> int:
'''simple docstring'''
if slice_size == "auto":
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__ )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
self.enable_attention_slicing(UpperCamelCase__ )
@torch.no_grad()
def __call__( self , A_ , A_=16000 , A_ = 512 , A_ = 512 , A_ = 50 , A_ = 7.5 , A_ = None , A_ = 1 , A_ = 0.0 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , **A_ , )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.speech_processor.feature_extractor(
UpperCamelCase__ , return_tensors='pt' , sampling_rate=UpperCamelCase__ ).input_features.to(self.device )
UpperCamelCase = self.speech_model.generate(UpperCamelCase__ , max_length=480000 )
UpperCamelCase = self.speech_processor.tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , normalize=UpperCamelCase__ )[
0
]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = 1
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = len(UpperCamelCase__ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(UpperCamelCase__ )}.''' )
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
UpperCamelCase__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase = text_embeddings.shape
UpperCamelCase = text_embeddings.repeat(1 , UpperCamelCase__ , 1 )
UpperCamelCase = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCamelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase = 42
if negative_prompt is None:
UpperCamelCase = [''] * batch_size
elif type(UpperCamelCase__ ) is not type(UpperCamelCase__ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase__ )} !='''
F''' {type(UpperCamelCase__ )}.''' )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = [negative_prompt]
elif batch_size != len(UpperCamelCase__ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase__ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
UpperCamelCase = negative_prompt
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
UpperCamelCase__ , padding='max_length' , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = uncond_embeddings.shape[1]
UpperCamelCase = uncond_embeddings.repeat(1 , UpperCamelCase__ , 1 )
UpperCamelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCamelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device='cpu' , dtype=UpperCamelCase__ ).to(
self.device )
else:
UpperCamelCase = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
UpperCamelCase = self.unet(UpperCamelCase__ , UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = noise_pred.chunk(2 )
UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = 1 / 0.18_215 * latents
UpperCamelCase = self.vae.decode(UpperCamelCase__ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCamelCase__ , nsfw_content_detected=UpperCamelCase__ )
| 3 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : UNetaDModel
_lowercase : ScoreSdeVeScheduler
def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : ScoreSdeVeScheduler):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__)
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_0_0_0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
snake_case__ = self.unet.config.sample_size
snake_case__ = (batch_size, 3, img_size, img_size)
snake_case__ = self.unet
snake_case__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__) * self.scheduler.init_noise_sigma
snake_case__ = sample.to(self.device)
self.scheduler.set_timesteps(UpperCamelCase__)
self.scheduler.set_sigmas(UpperCamelCase__)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
snake_case__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
snake_case__ = self.unet(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
# prediction step
snake_case__ = model(UpperCamelCase__ , UpperCamelCase__).sample
snake_case__ = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__)
snake_case__ , snake_case__ = output.prev_sample, output.prev_sample_mean
snake_case__ = sample_mean.clamp(0 , 1)
snake_case__ = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase__)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase__)
| 654 | 0 |
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return base * power(lowercase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
snake_case = int(input("""Enter the base: """).strip())
snake_case = int(input("""Enter the exponent: """).strip())
snake_case = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
snake_case = 1 / result
print(F"""{base} to the power of {exponent} is {result}""")
| 488 |
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 488 | 1 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
_lowercase : List[str] =logging.get_logger(__name__) # pylint: disable=invalid-name
def A__ ( lowercase: str ) -> List[Any]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowercase ):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def A__ ( lowercase: Tuple ) -> Tuple:
A : List[str] =pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
A : Dict =try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
A : Optional[Any] =PipelineDataFormat.from_str(
format=lowercase, output_path=args.output, input_path=args.input, column=args.column if args.column else nlp.default_input_names, overwrite=args.overwrite, )
return RunCommand(lowercase, lowercase )
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Pipeline , SCREAMING_SNAKE_CASE__ : PipelineDataFormat ) -> Any:
A : str =nlp
A : List[str] =reader
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> str:
A : Union[str, Any] =parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=SCREAMING_SNAKE_CASE__ , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE__ , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=SCREAMING_SNAKE_CASE__ , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=SCREAMING_SNAKE_CASE__ , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=SCREAMING_SNAKE_CASE__ , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=SCREAMING_SNAKE_CASE__ , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=SCREAMING_SNAKE_CASE__ , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=SCREAMING_SNAKE_CASE__ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
A , A : int =self._nlp, []
for entry in self._reader:
A : List[str] =nlp(**SCREAMING_SNAKE_CASE__ ) if self._reader.is_multi_columns else nlp(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
outputs.append(SCREAMING_SNAKE_CASE__ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
A : List[str] =self._reader.save_binary(SCREAMING_SNAKE_CASE__ )
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(SCREAMING_SNAKE_CASE__ )
| 305 | import tensorflow as tf
from ...tf_utils import shape_list
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A : List[Any] =vocab_size
A : Any =d_embed
A : str =d_proj
A : List[str] =cutoffs + [vocab_size]
A : Dict =[0] + self.cutoffs
A : Union[str, Any] =div_val
A : Optional[Any] =self.cutoffs[0]
A : Tuple =len(self.cutoffs ) - 1
A : Optional[int] =self.shortlist_size + self.n_clusters
A : Optional[int] =keep_order
A : Optional[Any] =[]
A : Dict =[]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
if self.n_clusters > 0:
A : Union[str, Any] =self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=SCREAMING_SNAKE_CASE__ , name='cluster_weight' )
A : Any =self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=SCREAMING_SNAKE_CASE__ , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A : int =self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=SCREAMING_SNAKE_CASE__ , name=f'out_projs_._{i}' , )
self.out_projs.append(SCREAMING_SNAKE_CASE__ )
else:
self.out_projs.append(SCREAMING_SNAKE_CASE__ )
A : Dict =self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=SCREAMING_SNAKE_CASE__ , name=f'out_layers_._{i}_._weight' , )
A : List[str] =self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=SCREAMING_SNAKE_CASE__ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A , A : Dict =self.cutoff_ends[i], self.cutoff_ends[i + 1]
A : Union[str, Any] =self.d_embed // (self.div_val**i)
A : Any =self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=SCREAMING_SNAKE_CASE__ , name=f'out_projs_._{i}' )
self.out_projs.append(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=SCREAMING_SNAKE_CASE__ , name=f'out_layers_._{i}_._weight' , )
A : List[Any] =self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=SCREAMING_SNAKE_CASE__ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(SCREAMING_SNAKE_CASE__ )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ) -> Optional[int]:
A : Optional[Any] =x
if proj is not None:
A : Optional[Any] =tf.einsum('ibd,ed->ibe' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return tf.einsum('ibd,nd->ibn' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) + b
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str ) -> str:
A : Any =shape_list(SCREAMING_SNAKE_CASE__ )
A : Any =tf.range(lp_size[0] , dtype=target.dtype )
A : Union[str, Any] =tf.stack([r, target] , 1 )
return tf.gather_nd(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Optional[int]:
A : Tuple =0
if self.n_clusters == 0:
A : List[str] =self._logit(SCREAMING_SNAKE_CASE__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A : List[str] =tf.nn.sparse_softmax_cross_entropy_with_logits(labels=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ )
A : str =tf.nn.log_softmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
else:
A : int =shape_list(SCREAMING_SNAKE_CASE__ )
A : str =[]
A : int =tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A , A : List[str] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A : Dict =(target >= l_idx) & (target < r_idx)
A : Any =tf.where(SCREAMING_SNAKE_CASE__ )
A : List[Any] =tf.boolean_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) - l_idx
if self.div_val == 1:
A : Union[str, Any] =self.out_layers[0][0][l_idx:r_idx]
A : str =self.out_layers[0][1][l_idx:r_idx]
else:
A : Union[str, Any] =self.out_layers[i][0]
A : List[Any] =self.out_layers[i][1]
if i == 0:
A : List[str] =tf.concat([cur_W, self.cluster_weight] , 0 )
A : Tuple =tf.concat([cur_b, self.cluster_bias] , 0 )
A : Optional[int] =self._logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.out_projs[0] )
A : str =tf.nn.log_softmax(SCREAMING_SNAKE_CASE__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A : List[Any] =tf.boolean_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : int =self._gather_logprob(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
A : str =self._logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.out_projs[i] )
A : Optional[int] =tf.nn.log_softmax(SCREAMING_SNAKE_CASE__ )
A : int =self.cutoffs[0] + i - 1 # No probability for the head cluster
A : List[str] =head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(SCREAMING_SNAKE_CASE__ )
if target is not None:
A : Dict =tf.boolean_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[int] =tf.boolean_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : str =self._gather_logprob(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(SCREAMING_SNAKE_CASE__ , -cur_logprob , shape_list(SCREAMING_SNAKE_CASE__ ) )
A : Optional[Any] =tf.concat(SCREAMING_SNAKE_CASE__ , axis=-1 )
if target is not None:
if return_mean:
A : Tuple =tf.reduce_mean(SCREAMING_SNAKE_CASE__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(SCREAMING_SNAKE_CASE__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(SCREAMING_SNAKE_CASE__ , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 305 | 1 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def a_ ( _UpperCAmelCase : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
__snake_case : List[str] = []
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(_UpperCAmelCase ) )
elif isinstance(_UpperCAmelCase ,(list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_UpperCAmelCase ) )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Tuple[int, ...] ) -> Tuple[int, ...]:
__snake_case : Tuple = []
for d in reversed(_UpperCAmelCase ):
idx.append(flat_idx % d )
__snake_case : List[Any] = flat_idx // d
return tuple(reversed(_UpperCAmelCase ) )
@torch.jit.ignore
def a_ ( _UpperCAmelCase : Sequence[int] ,_UpperCAmelCase : Sequence[int] ,_UpperCAmelCase : Sequence[int] ,_UpperCAmelCase : Optional[Sequence[bool]] = None ,_UpperCAmelCase : Optional[Sequence[bool]] = None ,) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(_UpperCAmelCase : List[bool] ) -> None:
__snake_case : Dict = True
for i in range(len(_UpperCAmelCase ) ):
__snake_case : Optional[int] = -1 * (i + 1)
l[reversed_idx] &= tally
__snake_case : Tuple = l[reversed_idx]
if start_edges is None:
__snake_case : List[str] = [s == 0 for s in start]
reduce_edge_list(_UpperCAmelCase )
if end_edges is None:
__snake_case : int = [e == (d - 1) for e, d in zip(_UpperCAmelCase ,_UpperCAmelCase )]
reduce_edge_list(_UpperCAmelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_UpperCAmelCase ) == 0:
return [()]
elif len(_UpperCAmelCase ) == 1:
return [(slice(start[0] ,end[0] + 1 ),)]
__snake_case : List[Tuple[slice, ...]] = []
__snake_case : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_UpperCAmelCase ,_UpperCAmelCase ):
if s == e:
path_list.append(slice(_UpperCAmelCase ,s + 1 ) )
else:
break
__snake_case : Tuple[slice, ...] = tuple(_UpperCAmelCase )
__snake_case : Tuple = len(_UpperCAmelCase )
# start == end, and we're done
if divergence_idx == len(_UpperCAmelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__snake_case : str = start[divergence_idx]
return tuple(
path + (slice(_UpperCAmelCase ,sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] ,[d - 1 for d in dims[divergence_idx + 1 :]] ,dims[divergence_idx + 1 :] ,start_edges=start_edges[divergence_idx + 1 :] ,end_edges=[True for _ in end_edges[divergence_idx + 1 :]] ,) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__snake_case : Tuple = end[divergence_idx]
return tuple(
path + (slice(_UpperCAmelCase ,edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] ,end[divergence_idx + 1 :] ,dims[divergence_idx + 1 :] ,start_edges=[True for _ in start_edges[divergence_idx + 1 :]] ,end_edges=end_edges[divergence_idx + 1 :] ,) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__snake_case : Any = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def a_ ( _UpperCAmelCase : torch.Tensor ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> torch.Tensor:
__snake_case : Any = t.shape[:no_batch_dims]
__snake_case : Tuple = list(_flat_idx_to_idx(_UpperCAmelCase ,_UpperCAmelCase ) )
# _get_minimal_slice_set is inclusive
__snake_case : int = list(_flat_idx_to_idx(flat_end - 1 ,_UpperCAmelCase ) )
# Get an ordered list of slices to perform
__snake_case : Optional[Any] = _get_minimal_slice_set(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
__snake_case : str = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def a_ ( _UpperCAmelCase : Callable ,_UpperCAmelCase : Dict[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : Any = None ,_UpperCAmelCase : bool = False ,) -> Any:
if not (len(_UpperCAmelCase ) > 0):
raise ValueError('Must provide at least one input' )
__snake_case : List[str] = [shape[:no_batch_dims] for shape in _fetch_dims(_UpperCAmelCase )]
__snake_case : int = tuple([max(_UpperCAmelCase ) for s in zip(*_UpperCAmelCase )] )
def _prep_inputs(_UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__snake_case : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__snake_case : List[Any] = t.reshape(-1 ,*t.shape[no_batch_dims:] )
else:
__snake_case : str = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__snake_case : Dict[str, Any] = tensor_tree_map(_prep_inputs ,_UpperCAmelCase )
__snake_case : str = None
if _out is not None:
__snake_case : List[Any] = tensor_tree_map(lambda _UpperCAmelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) ,_out )
__snake_case : Union[str, Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__snake_case : Any = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__snake_case : Union[str, Any] = 0
__snake_case : Union[str, Any] = prepped_outputs
for _ in range(_UpperCAmelCase ):
# Chunk the input
if not low_mem:
__snake_case : Dict = _select_chunk
else:
__snake_case : List[Any] = partial(
_chunk_slice ,flat_start=_UpperCAmelCase ,flat_end=min(_UpperCAmelCase ,i + chunk_size ) ,no_batch_dims=len(_UpperCAmelCase ) ,)
__snake_case : Dict[str, Any] = tensor_tree_map(_UpperCAmelCase ,_UpperCAmelCase )
# Run the layer on the chunk
__snake_case : str = layer(**_UpperCAmelCase )
# Allocate space for the output
if out is None:
__snake_case : Tuple = tensor_tree_map(lambda _UpperCAmelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) ,_UpperCAmelCase )
# Put the chunk in its pre-allocated space
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
def assign(_UpperCAmelCase : dict ,_UpperCAmelCase : dict ) -> None:
for k, v in da.items():
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
assign(_UpperCAmelCase ,da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__snake_case : Union[str, Any] = da[k]
assign(_UpperCAmelCase ,_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for xa, xa in zip(_UpperCAmelCase ,_UpperCAmelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__snake_case : Tuple = xa
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__snake_case : Union[str, Any] = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
__snake_case : int = tensor_tree_map(lambda _UpperCAmelCase : t.view(orig_batch_dims + t.shape[1:] ) ,_UpperCAmelCase )
return out
class snake_case__ :
def __init__( self : str , __a : int = 512 , ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[int] = max_chunk_size
__snake_case : Optional[int] = None
__snake_case : Optional[tuple] = None
def A_ ( self : Optional[Any] , __a : Callable , __a : tuple , __a : int ) -> int:
'''simple docstring'''
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__snake_case : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__snake_case : Any = [c for c in candidates if c > min_chunk_size]
__snake_case : Optional[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a : int ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
__snake_case : List[str] = 0
__snake_case : List[str] = len(__a ) - 1
while i > min_viable_chunk_size_index:
__snake_case : Any = test_chunk_size(candidates[i] )
if not viable:
__snake_case : List[str] = (min_viable_chunk_size_index + i) // 2
else:
__snake_case : Tuple = i
__snake_case : Optional[int] = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def A_ ( self : Optional[Any] , __a : Iterable , __a : Iterable ) -> bool:
'''simple docstring'''
__snake_case : List[Any] = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
__snake_case : str = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
__snake_case : List[str] = [v for _, v in sorted(aa.items() , key=lambda __a : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def A_ ( self : int , __a : Callable , __a : tuple , __a : int , ) -> int:
'''simple docstring'''
__snake_case : Dict = True
__snake_case : tuple = tree_map(lambda __a : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
__snake_case : List[Any] = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
__snake_case : str = False
if not consistent:
__snake_case : List[Any] = self._determine_favorable_chunk_size(
__a , __a , __a , )
__snake_case : Dict = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 124 |
'''simple docstring'''
A__ : str = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def a_ ( _UpperCAmelCase : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
__snake_case : int = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_UpperCAmelCase )
__snake_case : Optional[int] = ''.join(bin(_UpperCAmelCase )[2:].zfill(8 ) for byte in data )
__snake_case : Optional[int] = len(_UpperCAmelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
__snake_case : Dict = b'=' * ((6 - len(_UpperCAmelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_UpperCAmelCase ) % 6)
else:
__snake_case : List[Any] = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] ,2 )]
for index in range(0 ,len(_UpperCAmelCase ) ,6 ) ).encode()
+ padding
)
def a_ ( _UpperCAmelCase : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
__snake_case : List[Any] = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_UpperCAmelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
try:
__snake_case : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
__snake_case : List[Any] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_UpperCAmelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__snake_case : Any = encoded_data[:-padding]
__snake_case : str = ''.join(
bin(B64_CHARSET.index(_UpperCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__snake_case : str = ''.join(
bin(B64_CHARSET.index(_UpperCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )
__snake_case : Tuple = [
int(binary_stream[index : index + 8] ,2 )
for index in range(0 ,len(_UpperCAmelCase ) ,8 )
]
return bytes(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124 | 1 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=7 ):
lowercase_ = None
if token is not None:
lowercase_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
lowercase_ = """636036"""
lowercase_ = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
lowercase_ = requests.get(UpperCAmelCase__ , headers=UpperCAmelCase__ ).json()
return result["workflow_runs"]
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = get_daily_ci_runs(UpperCAmelCase__ )
lowercase_ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase_ = workflow_run["""id"""]
break
return workflow_run_id
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = get_last_daily_ci_runs(UpperCAmelCase__ )
if workflow_run_id is not None:
lowercase_ = get_artifacts_links(worflow_run_id=UpperCAmelCase__ , token=UpperCAmelCase__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase_ = artifacts_links[artifact_name]
download_artifact(
artifact_name=UpperCAmelCase__ , artifact_url=UpperCAmelCase__ , output_dir=UpperCAmelCase__ , token=UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
get_last_daily_ci_artifacts(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = {}
for artifact_name in artifact_names:
lowercase_ = os.path.join(UpperCAmelCase__ , F'''{artifact_name}.zip''' )
if os.path.isfile(UpperCAmelCase__ ):
lowercase_ = {}
with zipfile.ZipFile(UpperCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase__ ):
# read the file
with z.open(UpperCAmelCase__ ) as f:
lowercase_ = f.read().decode("""UTF-8""" )
return results
| 412 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = ['model.decoder.embed_positions.weights']
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if "emb" in name:
lowercase_ = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowercase_ = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowercase_ = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowercase_ = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowercase_ = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowercase_ = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowercase_ = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowercase_ = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowercase_ = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowercase_ = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowercase_ = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = list(state_dict.keys() )
lowercase_ = {}
for key in keys:
lowercase_ = state_dict.pop(UpperCAmelCase__ )
lowercase_ = rename_keys(UpperCAmelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
lowercase_ = val[:hidden_size, :]
lowercase_ = val[hidden_size : 2 * hidden_size, :]
lowercase_ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowercase_ = val
else:
lowercase_ = val
return state_dict, enc_dec_proj_state_dict
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if checkpoint == "small":
# default config values
lowercase_ = 1_0_2_4
lowercase_ = 2_4
lowercase_ = 1_6
elif checkpoint == "medium":
lowercase_ = 1_5_3_6
lowercase_ = 4_8
lowercase_ = 2_4
elif checkpoint == "large":
lowercase_ = 2_0_4_8
lowercase_ = 4_8
lowercase_ = 3_2
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowercase_ = MusicgenDecoderConfig(
hidden_size=UpperCAmelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCAmelCase__ , num_attention_heads=UpperCAmelCase__ , )
return config
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="cpu" ):
lowercase_ = MusicGen.get_pretrained(UpperCAmelCase__ , device=UpperCAmelCase__ )
lowercase_ = decoder_config_from_checkpoint(UpperCAmelCase__ )
lowercase_ = fairseq_model.lm.state_dict()
lowercase_ , lowercase_ = rename_state_dict(
UpperCAmelCase__ , hidden_size=decoder_config.hidden_size )
lowercase_ = TaEncoderModel.from_pretrained("""t5-base""" )
lowercase_ = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowercase_ = MusicgenForCausalLM(UpperCAmelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowercase_ , lowercase_ = decoder.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(UpperCAmelCase__ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowercase_ = MusicgenForConditionalGeneration(text_encoder=UpperCAmelCase__ , audio_encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCAmelCase__ )
# check we can do a forward pass
lowercase_ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowercase_ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowercase_ = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowercase_ = AutoTokenizer.from_pretrained("""t5-base""" )
lowercase_ = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowercase_ = MusicgenProcessor(feature_extractor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
# set the appropriate bos/pad token ids
lowercase_ = 2_0_4_8
lowercase_ = 2_0_4_8
# set other default generation config params
lowercase_ = int(3_0 * audio_encoder.config.frame_rate )
lowercase_ = True
lowercase_ = 3.0
if pytorch_dump_folder is not None:
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(UpperCAmelCase__ )
processor.push_to_hub(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 412 | 1 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowerCAmelCase__: Optional[int] = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
lowerCAmelCase__: Dict = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
lowerCAmelCase__: Optional[Any] = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
return float((preds == labels).mean() )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="binary" ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE , average=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Any = {}
for id_pred, label in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Optional[int] = f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
SCREAMING_SNAKE_CASE_ : List[Any] = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = [(pred, label)]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = [], []
for question, preds_labels in question_map.items():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = zip(*SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE , average='macro' )
fas.append(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(SCREAMING_SNAKE_CASE ) )
ems.append(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = float(sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : List[Any] = sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __A ( self ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __A ( self ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __A ( self , __lowerCAmelCase , __lowerCAmelCase ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__lowerCAmelCase , __lowerCAmelCase )}
elif self.config_name == "cb":
return acc_and_fa(__lowerCAmelCase , __lowerCAmelCase , fa_avg='macro' )
elif self.config_name == "record":
SCREAMING_SNAKE_CASE_ : Any = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
SCREAMING_SNAKE_CASE_ : Tuple = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(__lowerCAmelCase , __lowerCAmelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__lowerCAmelCase , __lowerCAmelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 311 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowerCAmelCase__: List[Any] = logging.get_logger(__name__)
class snake_case_ :
__lowerCamelCase : Any = None
@experimental
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return _map_with_joblib(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_proc if num_proc <= len(SCREAMING_SNAKE_CASE ) else len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = [] # We organize the splits ourselve (contiguous splits)
for index in range(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE ) // num_proc
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(SCREAMING_SNAKE_CASE ) % num_proc
SCREAMING_SNAKE_CASE_ : List[Any] = div * index + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(SCREAMING_SNAKE_CASE ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'Error dividing inputs iterable among processes. '
f'Total number of objects {len(SCREAMING_SNAKE_CASE )}, '
f'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
f'Spawning {num_proc} processes for {len(SCREAMING_SNAKE_CASE )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = None, None
if not disable_tqdm:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = (RLock(),), tqdm.set_lock
with Pool(SCREAMING_SNAKE_CASE , initargs=SCREAMING_SNAKE_CASE , initializer=SCREAMING_SNAKE_CASE ) as pool:
SCREAMING_SNAKE_CASE_ : Optional[int] = pool.map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info(f'Finished {num_proc} processes' )
SCREAMING_SNAKE_CASE_ : List[str] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f'Unpacked {len(SCREAMING_SNAKE_CASE )} objects' )
return mapped
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=SCREAMING_SNAKE_CASE ):
return joblib.Parallel()(
joblib.delayed(SCREAMING_SNAKE_CASE )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Tuple:
SCREAMING_SNAKE_CASE_ : str = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
SCREAMING_SNAKE_CASE_ : Dict = None
| 311 | 1 |
def _lowercase( __a : str ):
a__ =len(UpperCamelCase__ )
a__ =len(matrix[0] )
a__ =min(UpperCamelCase__ , UpperCamelCase__ )
for row in range(UpperCamelCase__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase__ ):
a__ =matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase__ , UpperCamelCase__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
a__ =True
for i in range(row + 1 , UpperCamelCase__ ):
if matrix[i][row] != 0:
a__ , a__ =matrix[i], matrix[row]
a__ =False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase__ ):
a__ =matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class lowercase ( lowercase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__SCREAMING_SNAKE_CASE : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'''labels''': ClassLabel} )
__SCREAMING_SNAKE_CASE : str = "text"
__SCREAMING_SNAKE_CASE : str = "labels"
def a ( self , snake_case ):
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , snake_case ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
snake_case_ = copy.deepcopy(self )
snake_case_ = self.label_schema.copy()
snake_case_ = features[self.label_column]
snake_case_ = label_schema
return task_template
@property
def a ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 362 | 0 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 710 |
import logging
import os
from .state import PartialState
class a ( logging.LoggerAdapter ):
@staticmethod
def _UpperCAmelCase ( A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _UpperCAmelCase ( self , A_ , A_ , *A_ , **A_ ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
_UpperCAmelCase : Tuple = kwargs.pop("main_process_only" , A_ )
_UpperCAmelCase : int = kwargs.pop("in_order" , A_ )
if self.isEnabledFor(A_ ):
if self._should_log(A_ ):
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.process(A_ , A_ )
self.logger.log(A_ , A_ , *A_ , **A_ )
elif in_order:
_UpperCAmelCase : Dict = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.process(A_ , A_ )
self.logger.log(A_ , A_ , *A_ , **A_ )
state.wait_for_everyone()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str = None ) -> List[Any]:
if log_level is None:
_UpperCAmelCase : List[str] = os.environ.get("ACCELERATE_LOG_LEVEL" , lowerCAmelCase )
_UpperCAmelCase : str = logging.getLogger(lowerCAmelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCAmelCase , {} )
| 467 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase = {
'camembert-base': 512,
}
__UpperCAmelCase = '▁'
class __lowercase ( __lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = CamembertTokenizer
def __init__( self : List[str] ,A : Optional[int]=None ,A : List[str]=None ,A : List[Any]="<s>" ,A : Optional[int]="</s>" ,A : Optional[Any]="</s>" ,A : str="<s>" ,A : Optional[Any]="<unk>" ,A : Tuple="<pad>" ,A : int="<mask>" ,A : Tuple=["<s>NOTUSED", "</s>NOTUSED"] ,**A : str ,):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
super().__init__(
A ,tokenizer_file=A ,bos_token=A ,eos_token=A ,sep_token=A ,cls_token=A ,unk_token=A ,pad_token=A ,mask_token=A ,additional_special_tokens=A ,**A ,)
UpperCAmelCase__ : List[str] = vocab_file
UpperCAmelCase__ : Union[str, Any] = False if not self.vocab_file else True
def __lowercase ( self : Tuple ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : str ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self : str ,A : str ,A : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : Dict = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file ,A )
return (out_vocab_file,)
| 65 |
from __future__ import annotations
import requests
def snake_case_ (__A : str ) -> dict:
__lowerCAmelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(__A ).json()
def snake_case_ (__A : int = 1_0 ) -> list[dict]:
__lowerCAmelCase : List[Any] = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
__lowerCAmelCase : Union[str, Any] = requests.get(__A ).json()[:max_stories]
return [get_hackernews_story(__A ) for story_id in story_ids]
def snake_case_ (__A : int = 1_0 ) -> str:
__lowerCAmelCase : Optional[Any] = hackernews_top_stories(__A )
return "\n".join("""* [{title}]({url})""".format(**__A ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 651 | 0 |
'''simple docstring'''
from manim import *
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : Optional[Any] ):
__snake_case = Rectangle(height=0.5 , width=0.5 )
__snake_case = Rectangle(height=0.25 , width=0.25 )
__snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
__snake_case = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
__snake_case = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
__snake_case = Text('CPU' , font_size=2_4 )
__snake_case = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
__snake_case = [mem.copy() for i in range(4 )]
__snake_case = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
__snake_case = Text('GPU' , font_size=2_4 )
__snake_case = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
__snake_case = Text('Model' , font_size=2_4 )
__snake_case = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
__snake_case = []
__snake_case = []
__snake_case = []
for i, rect in enumerate(__lowerCAmelCase ):
rect.set_stroke(__lowerCAmelCase )
__snake_case = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowerCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowerCAmelCase , buff=0.0 )
self.add(__lowerCAmelCase )
model_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase , *__lowerCAmelCase )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
__snake_case = Text('Loaded Checkpoint' , font_size=2_4 )
__snake_case = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowerCAmelCase )
__snake_case = []
__snake_case = []
for i, rect in enumerate(__lowerCAmelCase ):
__snake_case = fill.copy().set_fill(__lowerCAmelCase , opacity=0.7 )
target.move_to(__lowerCAmelCase )
ckpt_arr.append(__lowerCAmelCase )
__snake_case = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase )
__snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__snake_case = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=1_8 , )
blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCAmelCase )
__snake_case = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
__snake_case = [meta_mem.copy() for i in range(6 )]
__snake_case = [meta_mem.copy() for i in range(6 )]
__snake_case = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
__snake_case = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
__snake_case = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
__snake_case = Text('Disk' , font_size=2_4 )
__snake_case = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) , Write(__lowerCAmelCase , run_time=1 ) , Create(__lowerCAmelCase , run_time=1 ) )
__snake_case = []
for i, rect in enumerate(__lowerCAmelCase ):
__snake_case = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowerCAmelCase , run_time=1.5 ) )
self.play(*__lowerCAmelCase )
self.play(FadeOut(__lowerCAmelCase ) )
__snake_case = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) )
self.play(
FadeOut(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , *__lowerCAmelCase ) , )
self.wait()
| 427 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any=1_3 , __lowerCAmelCase : Any=3_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Any=3_2 , __lowerCAmelCase : int=2 , __lowerCAmelCase : str=4 , __lowerCAmelCase : str=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=1_0 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : int=None , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def lowercase__ ( self : Union[str, Any] ):
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Optional[int] ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ):
__snake_case = TFViTModel(config=__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__snake_case = self.image_size // 2
__snake_case = pixel_values[:, :, :image_size, :image_size]
__snake_case = model(__lowerCAmelCase , interpolate_pos_encoding=__lowerCAmelCase , training=__lowerCAmelCase )
__snake_case = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ):
__snake_case = self.type_sequence_label_size
__snake_case = TFViTForImageClassification(__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__snake_case = self.image_size // 2
__snake_case = pixel_values[:, :, :image_size, :image_size]
__snake_case = model(__lowerCAmelCase , interpolate_pos_encoding=__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case = 1
__snake_case = TFViTForImageClassification(__lowerCAmelCase )
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : List[str] ):
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : Any = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowercase_ : Optional[Any] = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
lowercase_ : Optional[int] = False
lowercase_ : Optional[int] = False
lowercase_ : Optional[Any] = False
def lowercase__ ( self : int ):
__snake_case = TFViTModelTester(self )
__snake_case = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def lowercase__ ( self : Tuple ):
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , tf.keras.layers.Layer ) )
def lowercase__ ( self : Any ):
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(__lowerCAmelCase )
__snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def lowercase__ ( self : Dict ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowercase__ ( self : Tuple ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def lowercase__ ( self : Dict ):
__snake_case = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase__ ( ):
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Union[str, Any] ):
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def lowercase__ ( self : Union[str, Any] ):
__snake_case = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=__lowerCAmelCase , return_tensors='tf' )
# forward pass
__snake_case = model(**__lowerCAmelCase )
# verify the logits
__snake_case = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
__snake_case = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 )
| 427 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
snake_case_ : List[str] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
snake_case_ : Union[str, Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
snake_case_ : List[Any] = subset[i - 1][j]
if arr[i - 1] <= j:
snake_case_ : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 480 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : List[str] = CpmAntTokenizer
_A : str = False
def __UpperCamelCase (self ):
super().setUp()
snake_case_ : Optional[int] = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __UpperCamelCase (self ):
snake_case_ : Dict = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
snake_case_ : Any = """今天天气真好!"""
snake_case_ : str = ["""今天""", """天气""", """真""", """好""", """!"""]
snake_case_ : Dict = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : Optional[int] = """今天天气真好!"""
snake_case_ : Dict = [tokenizer.bos_token] + tokens
snake_case_ : int = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
snake_case_ : int = tokenizer.decode(lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 480 | 1 |
'''simple docstring'''
import random
def UpperCamelCase_ ( snake_case_ : Tuple ) -> bool:
'''simple docstring'''
__lowerCAmelCase = num - 1
__lowerCAmelCase = 0
while s % 2 == 0:
__lowerCAmelCase = s // 2
t += 1
for _ in range(5 ):
__lowerCAmelCase = random.randrange(2 , num - 1 )
__lowerCAmelCase = pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if v != 1:
__lowerCAmelCase = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__lowerCAmelCase = i + 1
__lowerCAmelCase = (v**2) % num
return True
def UpperCamelCase_ ( snake_case_ : Union[str, Any] ) -> bool:
'''simple docstring'''
if num < 2:
return False
__lowerCAmelCase = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_lowerCAmelCase )
def UpperCamelCase_ ( snake_case_ : Dict = 10_24 ) -> int:
'''simple docstring'''
while True:
__lowerCAmelCase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_lowerCAmelCase ):
return num
if __name__ == "__main__":
_A : List[Any] = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 716 | '''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _lowercase :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
_SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
_SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
_SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : float
_SCREAMING_SNAKE_CASE : float
_SCREAMING_SNAKE_CASE : Tuple[int]
def a ( self : str ) -> List[Any]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a ( self : Optional[Any] ) -> Dict:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a ( self : Optional[Any] ) -> Optional[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a ( self : List[Any] ) -> torch.Tensor:
__lowerCAmelCase = torch.arange(self.height * self.width )
__lowerCAmelCase = torch.stack(
[
pixel_indices % self.width,
torch.div(SCREAMING_SNAKE_CASE__ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def a ( self : Tuple ) -> int:
__lowerCAmelCase , *__lowerCAmelCase = self.shape
__lowerCAmelCase = int(np.prod(SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = self.get_image_coords()
__lowerCAmelCase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__lowerCAmelCase = self.get_camera_rays(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = rays.view(SCREAMING_SNAKE_CASE__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
__lowerCAmelCase , *__lowerCAmelCase , __lowerCAmelCase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowerCAmelCase = coords.view(SCREAMING_SNAKE_CASE__ , -1 , 2 )
__lowerCAmelCase = self.resolution()
__lowerCAmelCase = self.fov()
__lowerCAmelCase = (flat.float() / (res - 1)) * 2 - 1
__lowerCAmelCase = fracs * torch.tan(fov / 2 )
__lowerCAmelCase = fracs.view(SCREAMING_SNAKE_CASE__ , -1 , 2 )
__lowerCAmelCase = (
self.z.view(SCREAMING_SNAKE_CASE__ , 1 , 3 )
+ self.x.view(SCREAMING_SNAKE_CASE__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(SCREAMING_SNAKE_CASE__ , 1 , 3 ) * fracs[:, :, 1:]
)
__lowerCAmelCase = directions / directions.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.stack(
[
torch.broadcast_to(self.origin.view(SCREAMING_SNAKE_CASE__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , 2 , 3 )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=SCREAMING_SNAKE_CASE__ , height=SCREAMING_SNAKE_CASE__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def UpperCamelCase_ ( snake_case_ : int ) -> DifferentiableProjectiveCamera:
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
__lowerCAmelCase = np.array([np.sin(snake_case_ ), np.cos(snake_case_ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowerCAmelCase = -z * 4
__lowerCAmelCase = np.array([np.cos(snake_case_ ), -np.sin(snake_case_ ), 0.0] )
__lowerCAmelCase = np.cross(snake_case_ , snake_case_ )
origins.append(snake_case_ )
xs.append(snake_case_ )
ys.append(snake_case_ )
zs.append(snake_case_ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case_ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(snake_case_ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(snake_case_ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(snake_case_ , axis=0 ) ).float() , width=snake_case_ , height=snake_case_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(snake_case_ )) , )
| 330 | 0 |
'''simple docstring'''
import sys
from collections import defaultdict
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : List[str] ):
"""simple docstring"""
_lowercase = []
def snake_case ( self : Optional[Any] , __A : List[str] ):
"""simple docstring"""
return self.node_position[vertex]
def snake_case ( self : Any , __A : Dict , __A : List[str] ):
"""simple docstring"""
_lowercase = pos
def snake_case ( self : Optional[int] , __A : Any , __A : List[Any] , __A : Union[str, Any] , __A : Any ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_lowercase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_lowercase = 2 * start + 1
else:
_lowercase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_lowercase , _lowercase = heap[smallest_child], positions[smallest_child]
_lowercase , _lowercase = (
heap[start],
positions[start],
)
_lowercase , _lowercase = temp, tempa
_lowercase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __A )
self.top_to_bottom(__A , __A , __A , __A )
def snake_case ( self : Dict , __A : Tuple , __A : Union[str, Any] , __A : Union[str, Any] , __A : Union[str, Any] ):
"""simple docstring"""
_lowercase = position[index]
while index != 0:
_lowercase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_lowercase = heap[parent]
_lowercase = position[parent]
self.set_position(position[parent] , __A )
else:
_lowercase = val
_lowercase = temp
self.set_position(__A , __A )
break
_lowercase = parent
else:
_lowercase = val
_lowercase = temp
self.set_position(__A , 0 )
def snake_case ( self : int , __A : List[str] , __A : List[str] ):
"""simple docstring"""
_lowercase = len(__A ) // 2 - 1
for i in range(__A , -1 , -1 ):
self.top_to_bottom(__A , __A , len(__A ) , __A )
def snake_case ( self : int , __A : Optional[int] , __A : str ):
"""simple docstring"""
_lowercase = positions[0]
_lowercase = sys.maxsize
self.top_to_bottom(__A , 0 , len(__A ) , __A )
return temp
def A__ ( A_ ) -> int:
_lowercase = Heap()
_lowercase = [0] * len(A_ )
_lowercase = [-1] * len(A_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_lowercase = [] # Heap of Distance of vertices from their neighboring vertex
_lowercase = []
for vertex in range(len(A_ ) ):
distance_tv.append(sys.maxsize )
positions.append(A_ )
heap.node_position.append(A_ )
_lowercase = []
_lowercase = 1
_lowercase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_lowercase = 0
_lowercase = distance
heap.heapify(A_ , A_ )
for _ in range(1 , len(A_ ) ):
_lowercase = heap.delete_minimum(A_ , A_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_lowercase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(A_ )]
):
_lowercase = distance
heap.bottom_to_top(
A_ , heap.get_position(A_ ) , A_ , A_ )
_lowercase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__magic_name__ : List[str] = int(input('''Enter number of edges: ''').strip())
__magic_name__ : List[Any] = defaultdict(list)
for _ in range(edges_number):
__magic_name__ : Union[str, Any] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 497 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__magic_name__ : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
__magic_name__ : int = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__magic_name__ : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def A__ ( A_ ) -> Any:
with open(A_ , "rb" ) as f:
_lowercase = Image.open(A_ )
return im.convert("RGB" )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'A folder containing the training data.'} )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'A folder containing the validation data.'} )
UpperCAmelCase__ = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def snake_case ( self : int ):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCamelCase__ )} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
UpperCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'Name or path of preprocessor config.'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def A__ ( A_ ) -> Optional[Any]:
_lowercase = torch.stack([example["pixel_values"] for example in examples] )
_lowercase = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def A__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , A_ , A_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowercase = training_args.get_process_log_level()
logger.setLevel(A_ )
transformers.utils.logging.set_verbosity(A_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_lowercase = {}
if data_args.train_dir is not None:
_lowercase = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
_lowercase = os.path.join(data_args.validation_dir , "**" )
_lowercase = load_dataset(
"imagefolder" , data_files=A_ , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowercase = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , A_ ) and data_args.train_val_split > 0.0:
_lowercase = dataset["train"].train_test_split(data_args.train_val_split )
_lowercase = split["train"]
_lowercase = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_lowercase = dataset["train"].features["labels"].names
_lowercase , _lowercase = {}, {}
for i, label in enumerate(A_ ):
_lowercase = str(A_ )
_lowercase = label
# Load the accuracy metric from the datasets package
_lowercase = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A_ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(A_ ) , labelaid=A_ , idalabel=A_ , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowercase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_lowercase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_lowercase = image_processor.size["shortest_edge"]
else:
_lowercase = (image_processor.size["height"], image_processor.size["width"])
_lowercase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_lowercase = Compose(
[
RandomResizedCrop(A_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_lowercase = Compose(
[
Resize(A_ ),
CenterCrop(A_ ),
ToTensor(),
normalize,
] )
def train_transforms(A_ ):
_lowercase = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(A_ ):
_lowercase = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_lowercase = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(A_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_lowercase = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(A_ )
# Initalize our trainer
_lowercase = Trainer(
model=A_ , args=A_ , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=A_ , tokenizer=A_ , data_collator=A_ , )
# Training
if training_args.do_train:
_lowercase = None
if training_args.resume_from_checkpoint is not None:
_lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase = last_checkpoint
_lowercase = trainer.train(resume_from_checkpoint=A_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowercase = trainer.evaluate()
trainer.log_metrics("eval" , A_ )
trainer.save_metrics("eval" , A_ )
# Write model card and (optionally) push to hub
_lowercase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A_ )
else:
trainer.create_model_card(**A_ )
if __name__ == "__main__":
main()
| 497 | 1 |
'''simple docstring'''
from timeit import timeit
UpperCamelCase__ = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCamelCase (a__ :Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = 0
UpperCamelCase__ = len(lowerCAmelCase_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCamelCase (a__ :str ):
"""simple docstring"""
UpperCamelCase__ = len(lowerCAmelCase_ ) // 2
UpperCamelCase__ = len(lowerCAmelCase_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(lowerCAmelCase_ ) )
def _UpperCamelCase (a__ :List[Any] ):
"""simple docstring"""
if len(lowerCAmelCase_ ) <= 2:
return True
if s[0] == s[len(lowerCAmelCase_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCamelCase (a__ :Dict ):
"""simple docstring"""
return s == s[::-1]
def _UpperCamelCase (a__ :Any ):
"""simple docstring"""
UpperCamelCase__ = f"""all({name}(key) is value for key, value in test_data.items())"""
UpperCamelCase__ = f"""from __main__ import test_data, {name}"""
UpperCamelCase__ = 50_0000
UpperCamelCase__ = timeit(stmt=lowerCAmelCase_ , setup=lowerCAmelCase_ , number=lowerCAmelCase_ )
print(f"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"""{key:21} {value}""")
print("a man a plan a canal panama")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("is_palindrome_slice")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("is_palindrome")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("is_palindrome_recursive")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("is_palindrome_traversal")
| 707 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __SCREAMING_SNAKE_CASE :
snake_case : Dict = PegasusConfig
snake_case : Any = {}
snake_case : int = """gelu"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=40 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = bos_token_id
def _lowerCamelCase ( self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase__ = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = TFPegasusModel(config=__lowerCAmelCase ).get_decoder()
UpperCamelCase__ = inputs_dict["""input_ids"""]
UpperCamelCase__ = input_ids[:1, :]
UpperCamelCase__ = inputs_dict["""attention_mask"""][:1, :]
UpperCamelCase__ = inputs_dict["""head_mask"""]
UpperCamelCase__ = 1
# first forward pass
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , head_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-3 )
def _UpperCamelCase (a__ :List[str] , a__ :Any , a__ :str , a__ :Optional[int]=None , a__ :Union[str, Any]=None , a__ :Optional[int]=None , a__ :Optional[int]=None , a__ :List[str]=None , ):
"""simple docstring"""
if attention_mask is None:
UpperCamelCase__ = tf.cast(tf.math.not_equal(a__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case : Optional[int] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
snake_case : Dict = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
snake_case : Optional[int] = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case : Optional[Any] = True
snake_case : int = False
snake_case : int = False
def _lowerCamelCase ( self ):
UpperCamelCase__ = TFPegasusModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case : str = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
snake_case : List[Any] = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
snake_case : Dict = """google/pegasus-xsum"""
@cached_property
def _lowerCamelCase ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowerCamelCase ( self ):
UpperCamelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowerCamelCase ( self , **__lowerCAmelCase ):
UpperCamelCase__ = self.translate_src_text(**__lowerCAmelCase )
assert self.expected_text == generated_words
def _lowerCamelCase ( self , **__lowerCAmelCase ):
UpperCamelCase__ = self.tokenizer(self.src_text , **__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""tf""" )
UpperCamelCase__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCAmelCase , )
UpperCamelCase__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCAmelCase )
return generated_words
@slow
def _lowerCamelCase ( self ):
self._assert_generated_batch_equal_expected()
| 548 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
for param in module.parameters():
snake_case__ = False
def SCREAMING_SNAKE_CASE ( ) -> int:
snake_case__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
snake_case__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
snake_case__ = plt.imshow(__lowerCAmelCase )
fig.axes.get_xaxis().set_visible(__lowerCAmelCase )
fig.axes.get_yaxis().set_visible(__lowerCAmelCase )
plt.show()
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
snake_case__ = datetime.now()
snake_case__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 33 |
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
snake_case__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i]
snake_case__ = []
snake_case__ = 0
snake_case__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case__ = []
snake_case__ = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case__ = i
total_time += burst_time[target_process]
completed += 1
snake_case__ = 0
snake_case__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
lowerCamelCase__ : Tuple = 4
lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7]
lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0]
lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 33 | 1 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A__ :
"""simple docstring"""
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0)
a__ : Optional[int] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
a__ : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
a__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
a__ : Dict = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=lowerCamelCase__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
a__ : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __lowercase ( self) -> str:
'''simple docstring'''
torch.manual_seed(0)
a__ : str = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
a__ : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
a__ : Optional[int] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
a__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=lowerCamelCase__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
a__ : Dict = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0)
a__ : List[str] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[str] = self.get_dummy_components()
a__ : List[str] = self.pipeline_class(**lowerCamelCase__)
pipe.to(lowerCamelCase__)
pipe.set_progress_bar_config(disable=lowerCamelCase__)
a__ : Any = self.get_dummy_inputs(lowerCamelCase__)
a__ : List[Any] = inputs['''prompt''']
a__ : List[Any] = inputs['''generator''']
a__ : str = inputs['''num_inference_steps''']
a__ : Union[str, Any] = inputs['''output_type''']
if "image" in inputs:
a__ : Optional[Any] = inputs['''image''']
else:
a__ : int = None
if "mask_image" in inputs:
a__ : int = inputs['''mask_image''']
else:
a__ : Dict = None
if "original_image" in inputs:
a__ : int = inputs['''original_image''']
else:
a__ : Any = None
a__ : Optional[Any] = pipe.encode_prompt(lowerCamelCase__)
# inputs with prompt converted to embeddings
a__ : Optional[int] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
a__ : Tuple = image
if mask_image is not None:
a__ : Optional[Any] = mask_image
if original_image is not None:
a__ : Dict = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
a__ : Optional[int] = pipe(**lowerCamelCase__)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase__)
a__ : Optional[Any] = self.pipeline_class.from_pretrained(lowerCamelCase__)
pipe_loaded.to(lowerCamelCase__)
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase__)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase__ , lowerCamelCase__) is None , F'`{optional_component}` did not stay set to None after loading.' , )
a__ : Optional[Any] = self.get_dummy_inputs(lowerCamelCase__)
a__ : Optional[Any] = inputs['''generator''']
a__ : int = inputs['''num_inference_steps''']
a__ : Optional[int] = inputs['''output_type''']
# inputs with prompt converted to embeddings
a__ : List[str] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
a__ : Any = image
if mask_image is not None:
a__ : List[str] = mask_image
if original_image is not None:
a__ : str = original_image
a__ : str = pipe_loaded(**lowerCamelCase__)[0]
a__ : Any = np.abs(to_np(lowerCamelCase__) - to_np(lowerCamelCase__)).max()
self.assertLess(lowerCamelCase__ , 1e-4)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Optional[int] = self.get_dummy_components()
a__ : List[str] = self.pipeline_class(**lowerCamelCase__)
pipe.to(lowerCamelCase__)
pipe.set_progress_bar_config(disable=lowerCamelCase__)
a__ : Dict = self.get_dummy_inputs(lowerCamelCase__)
a__ : Dict = pipe(**lowerCamelCase__)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase__)
a__ : Union[str, Any] = self.pipeline_class.from_pretrained(lowerCamelCase__)
pipe_loaded.to(lowerCamelCase__)
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase__)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
a__ : Tuple = self.get_dummy_inputs(lowerCamelCase__)
a__ : Union[str, Any] = pipe_loaded(**lowerCamelCase__)[0]
a__ : List[Any] = np.abs(to_np(lowerCamelCase__) - to_np(lowerCamelCase__)).max()
self.assertLess(lowerCamelCase__ , 1e-4)
| 718 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Tuple = ['''image_processor''', '''tokenizer''']
__A : Any = '''ChineseCLIPImageProcessor'''
__A : Tuple = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , lowercase=None , lowercase=None , **lowercase) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase , )
a__ : Optional[Any] = kwargs.pop('feature_extractor')
a__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowercase , lowercase)
a__ : List[str] = self.image_processor
def __call__( self , lowercase=None , lowercase=None , lowercase=None , **lowercase) -> List[str]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
a__ : str = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase)
if images is not None:
a__ : Optional[Any] = self.image_processor(lowercase , return_tensors=lowercase , **lowercase)
if text is not None and images is not None:
a__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase) , tensor_type=lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase)
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = self.tokenizer.model_input_names
a__ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __lowercase ( self) -> Tuple:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase , )
return self.image_processor_class
| 392 | 0 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase = 1_2_8 , _lowercase = 2_5_6 , _lowercase = 2000.0 , _lowercase = 7_6_8 , _lowercase = 1_2 , _lowercase = 1_2 , _lowercase = 6_4 , _lowercase = 2_0_4_8 , _lowercase = 0.1 , ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[Any] = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , )
snake_case_ : Any = nn.Embedding(_lowercase , _lowercase )
snake_case_ : Union[str, Any] = False
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Union[str, Any] = nn.Dropout(p=_lowercase )
snake_case_ : Tuple = nn.ModuleList()
for lyr_num in range(_lowercase ):
# FiLM conditional T5 decoder
snake_case_ : Union[str, Any] = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
self.decoders.append(_lowercase )
snake_case_ : List[Any] = TaLayerNorm(_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(p=_lowercase )
snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
snake_case_ : Optional[int] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
snake_case_ : int = self.conditioning_emb(_lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
snake_case_ : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
snake_case_ : Dict = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
snake_case_ : Tuple = self.position_encoding(_lowercase )
snake_case_ : Optional[Any] = self.continuous_inputs_projection(_lowercase )
inputs += position_encodings
snake_case_ : List[Any] = self.dropout(_lowercase )
# decoder: No padding present.
snake_case_ : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
snake_case_ : int = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
snake_case_ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
snake_case_ : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
snake_case_ : int = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
snake_case_ : int = self.decoder_norm(_lowercase )
snake_case_ : Union[str, Any] = self.post_dropout(_lowercase )
snake_case_ : int = self.spec_out(_lowercase )
return spec_out
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=1E-6 ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
snake_case_ : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
snake_case_ : str = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
snake_case_ : Any = self.layer[-1](_lowercase , _lowercase )
return (hidden_states,)
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Any = TaLayerNorm(_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Union[str, Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : List[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : str = self.FiLMLayer(_lowercase , _lowercase )
# Self-attention block
snake_case_ : List[Any] = self.attention(_lowercase )
snake_case_ : List[str] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
snake_case_ : Union[str, Any] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Optional[Any] = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
snake_case_ : Optional[Any] = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , )
snake_case_ : Any = hidden_states + self.dropout(_lowercase )
return layer_output
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
super().__init__()
snake_case_ : Tuple = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
snake_case_ : Optional[int] = TaLayerNorm(_lowercase , eps=_lowercase )
snake_case_ : Tuple = nn.Dropout(_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.layer_norm(_lowercase )
if conditioning_emb is not None:
snake_case_ : Optional[int] = self.film(_lowercase , _lowercase )
snake_case_ : int = self.DenseReluDense(_lowercase )
snake_case_ : Optional[Any] = hidden_states + self.dropout(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : Any = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
snake_case_ : int = nn.Dropout(_lowercase )
snake_case_ : Optional[int] = NewGELUActivation()
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : str = self.act(self.wi_a(_lowercase ) )
snake_case_ : Dict = self.wi_a(_lowercase )
snake_case_ : Any = hidden_gelu * hidden_linear
snake_case_ : List[Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.wo(_lowercase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1E-6 ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : Union[str, Any] = nn.Parameter(torch.ones(_lowercase ) )
snake_case_ : int = eps
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase )
snake_case_ : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
snake_case_ : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def UpperCAmelCase__ ( self , _lowercase ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_lowercase , 3.0 )) ))
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.scale_bias(_lowercase )
snake_case_ , snake_case_ : Any = torch.chunk(_lowercase , 2 , -1 )
snake_case_ : Optional[Any] = x * (1 + scale) + shift
return x
| 58 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ) -> list:
_UpperCamelCase : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCamelCase : List[str] = True
for i in range(0 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : int = False
for i in range(1 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase : Union[str, Any] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 683 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase__ = []
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for i in range(len(A__ ) ):
if board[row][i] == 1:
return False
for i in range(len(A__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(A__ , -1 , -1 ) , range(A__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(A__ , -1 , -1 ) , range(A__ , len(A__ ) ) ):
if board[i][j] == 1:
return False
return True
def _A ( A__ , A__ ):
"""simple docstring"""
if row >= len(A__ ):
solution.append(A__ )
printboard(A__ )
print()
return True
for i in range(len(A__ ) ):
if is_safe(A__ , A__ , A__ ):
__lowercase = 1
solve(A__ , row + 1 )
__lowercase = 0
return False
def _A ( A__ ):
"""simple docstring"""
for i in range(len(A__ ) ):
for j in range(len(A__ ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase__ = 8
lowerCAmelCase__ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 718 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( *lowercase__ : Union[str, Any] ,**lowercase__ : Tuple ):
pass
def _A ( A__ ):
"""simple docstring"""
__lowercase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : int ):
__lowercase = DepthEstimationPipeline(model=lowercase__ ,image_processor=lowercase__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ):
__lowercase = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} ,lowercase__ )
import datasets
__lowercase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' ,'''image''' ,split='''test''' )
__lowercase = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] ,lowercase__ ,)
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = '''Intel/dpt-large'''
__lowercase = pipeline('''depth-estimation''' ,model=lowercase__ )
__lowercase = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
__lowercase = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) ,2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) ,2.6_6_2 )
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 624 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 257 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE : str = {
"facebook/mbart-large-en-ro": 1_024,
"facebook/mbart-large-cc25": 1_024,
}
# fmt: off
SCREAMING_SNAKE_CASE : Optional[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : List[str] =VOCAB_FILES_NAMES
lowercase : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
lowercase : Union[str, Any] =["""input_ids""", """attention_mask"""]
lowercase : Optional[int] =MBartTokenizer
lowercase : List[int] =[]
lowercase : List[int] =[]
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ :Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
vocab_file=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase_ :Optional[int] = vocab_file
lowercase_ :Any = False if not self.vocab_file else True
lowercase_ :int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowercase_ :Optional[int] = {
lang_code: self.convert_tokens_to_ids(UpperCamelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase_ :Dict = src_lang if src_lang is not None else '''en_XX'''
lowercase_ :Any = self.convert_tokens_to_ids(self._src_lang )
lowercase_ :Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
lowercase_ :Optional[Any] = [self.sep_token_id]
lowercase_ :Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ :str = src_lang
lowercase_ :List[Any] = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
lowercase_ :Union[str, Any] = self.convert_tokens_to_ids(UpperCamelCase_ )
lowercase_ :Any = tgt_lang_id
return inputs
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = "en_XX" , UpperCamelCase_ = None , UpperCamelCase_ = "ro_RO" , **UpperCamelCase_ , ):
lowercase_ :List[str] = src_lang
lowercase_ :Any = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = self.convert_tokens_to_ids(UpperCamelCase_ )
lowercase_ :Tuple = []
lowercase_ :Tuple = [self.eos_token_id, self.cur_lang_code]
lowercase_ :Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ :int = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ :Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :List[str] = self.convert_tokens_to_ids(UpperCamelCase_ )
lowercase_ :Union[str, Any] = []
lowercase_ :Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
lowercase_ :Dict = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ :List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ :int = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
lowercase_ :Dict = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 257 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {'''vocab_file''': '''spiece.model'''}
_SCREAMING_SNAKE_CASE : Tuple = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
_SCREAMING_SNAKE_CASE : Any = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
_SCREAMING_SNAKE_CASE : List[str] = 0
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : Dict = 2
_SCREAMING_SNAKE_CASE : List[str] = 3
_SCREAMING_SNAKE_CASE : List[Any] = 4
class __magic_name__ ( __a ):
_SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : str = '''left'''
def __init__( self : Dict , snake_case_ : int , snake_case_ : Optional[Any]=False , snake_case_ : List[str]=True , snake_case_ : Tuple=False , snake_case_ : int="<s>" , snake_case_ : Tuple="</s>" , snake_case_ : Tuple="<unk>" , snake_case_ : str="<sep>" , snake_case_ : Dict="<pad>" , snake_case_ : int="<cls>" , snake_case_ : Union[str, Any]="<mask>" , snake_case_ : List[Any]=["<eop>", "<eod>"] , snake_case_ : Optional[Dict[str, Any]] = None , **snake_case_ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
__snake_case = 3
__snake_case = do_lower_case
__snake_case = remove_space
__snake_case = keep_accents
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
@property
def lowerCAmelCase ( self : List[Any] ):
return len(self.sp_model )
def lowerCAmelCase ( self : Dict ):
__snake_case = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self : Union[str, Any] , snake_case_ : Optional[int] ):
__snake_case = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : List[Any] ):
if self.remove_space:
__snake_case = " ".join(inputs.strip().split() )
else:
__snake_case = inputs
__snake_case = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__snake_case = unicodedata.normalize("NFKD" , snake_case__ )
__snake_case = "".join([c for c in outputs if not unicodedata.combining(snake_case__ )] )
if self.do_lower_case:
__snake_case = outputs.lower()
return outputs
def lowerCAmelCase ( self : Dict , snake_case_ : str ):
__snake_case = self.preprocess_text(snake_case__ )
__snake_case = self.sp_model.encode(snake_case__ , out_type=snake_case__ )
__snake_case = []
for piece in pieces:
if len(snake_case__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__snake_case = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__snake_case = cur_pieces[1:]
else:
__snake_case = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case__ )
else:
new_pieces.append(snake_case__ )
return new_pieces
def lowerCAmelCase ( self : Tuple , snake_case_ : int ):
return self.sp_model.PieceToId(snake_case__ )
def lowerCAmelCase ( self : Dict , snake_case_ : Optional[Any] ):
return self.sp_model.IdToPiece(snake_case__ )
def lowerCAmelCase ( self : Any , snake_case_ : int ):
__snake_case = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def lowerCAmelCase ( self : Tuple , snake_case_ : List[int] , snake_case_ : bool = False , snake_case_ : bool = None , snake_case_ : bool = True , **snake_case_ : Optional[int] , ):
__snake_case = kwargs.pop("use_source_tokenizer" , snake_case__ )
__snake_case = self.convert_ids_to_tokens(snake_case__ , skip_special_tokens=snake_case__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__snake_case = []
__snake_case = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case__ ) )
__snake_case = []
sub_texts.append(snake_case__ )
else:
current_sub_text.append(snake_case__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__snake_case = "".join(snake_case__ )
__snake_case = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__snake_case = self.clean_up_tokenization(snake_case__ )
return clean_text
else:
return text
def lowerCAmelCase ( self : Tuple , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1, 1]
return ([0] * len(snake_case__ )) + [1, 1]
def lowerCAmelCase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__snake_case = [self.sep_token_id]
__snake_case = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase ( self : Any , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 706 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : torch.FloatTensor
_SCREAMING_SNAKE_CASE : torch.FloatTensor
class __magic_name__ ( lowercase__ , lowercase__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = 1
@register_to_config
def __init__( self : List[Any] , snake_case_ : int = 2000 , snake_case_ : float = 0.15 , snake_case_ : float = 0.01 , snake_case_ : float = 1348.0 , snake_case_ : float = 1e-5 , snake_case_ : int = 1 , ):
# standard deviation of the initial noise distribution
__snake_case = sigma_max
# setable values
__snake_case = None
self.set_sigmas(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : torch.FloatTensor , snake_case_ : Optional[int] = None ):
return sample
def lowerCAmelCase ( self : Any , snake_case_ : int , snake_case_ : float = None , snake_case_ : Union[str, torch.device] = None ):
__snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__snake_case = torch.linspace(1 , snake_case_ , snake_case_ , device=snake_case_ )
def lowerCAmelCase ( self : Dict , snake_case_ : int , snake_case_ : float = None , snake_case_ : float = None , snake_case_ : float = None ):
__snake_case = sigma_min if sigma_min is not None else self.config.sigma_min
__snake_case = sigma_max if sigma_max is not None else self.config.sigma_max
__snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(snake_case_ , snake_case_ )
__snake_case = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__snake_case = torch.exp(torch.linspace(math.log(snake_case_ ) , math.log(snake_case_ ) , snake_case_ ) )
__snake_case = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Dict ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowerCAmelCase ( self : Dict , snake_case_ : torch.FloatTensor , snake_case_ : int , snake_case_ : torch.FloatTensor , snake_case_ : Optional[torch.Generator] = None , snake_case_ : bool = True , ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
__snake_case = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__snake_case = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__snake_case = timesteps.to(self.discrete_sigmas.device )
__snake_case = self.discrete_sigmas[timesteps].to(sample.device )
__snake_case = self.get_adjacent_sigma(snake_case_ , snake_case_ ).to(sample.device )
__snake_case = torch.zeros_like(snake_case_ )
__snake_case = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__snake_case = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__snake_case = diffusion.unsqueeze(-1 )
__snake_case = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__snake_case = randn_tensor(
sample.shape , layout=sample.layout , generator=snake_case_ , device=sample.device , dtype=sample.dtype )
__snake_case = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__snake_case = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=snake_case_ , prev_sample_mean=snake_case_ )
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : torch.FloatTensor , snake_case_ : torch.FloatTensor , snake_case_ : Optional[torch.Generator] = None , snake_case_ : bool = True , ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__snake_case = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__snake_case = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__snake_case = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__snake_case = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__snake_case = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__snake_case = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__snake_case = step_size.unsqueeze(-1 )
__snake_case = sample + step_size * model_output
__snake_case = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def lowerCAmelCase ( self : Dict , snake_case_ : torch.FloatTensor , snake_case_ : torch.FloatTensor , snake_case_ : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__snake_case = timesteps.to(original_samples.device )
__snake_case = self.discrete_sigmas.to(original_samples.device )[timesteps]
__snake_case = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(snake_case_ ) * sigmas[:, None, None, None]
)
__snake_case = noise + original_samples
return noisy_samples
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 614 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
UpperCamelCase__ = TypeVar('''KT''')
UpperCamelCase__ = TypeVar('''VT''')
class a__ ( Generic[KT, VT] ):
def __init__( self : int ,a__ : int = "root" ,a__ : List[Any] = None) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:List[str] = key
_lowerCAmelCase:int = value
_lowerCAmelCase:str = []
def __repr__( self : Any) -> str:
"""simple docstring"""
return F'Node({self.key}: {self.value})'
@property
def __UpperCamelCase ( self : Tuple) -> int:
"""simple docstring"""
return len(self.forward)
class a__ ( Generic[KT, VT] ):
def __init__( self : List[Any] ,a__ : int = 0.5 ,a__ : Tuple = 16) -> Dict:
"""simple docstring"""
_lowerCAmelCase:List[Any] = Node[KT, VT]()
_lowerCAmelCase:Optional[Any] = 0
_lowerCAmelCase:Tuple = p
_lowerCAmelCase:int = max_level
def __str__( self : Union[str, Any]) -> str:
"""simple docstring"""
_lowerCAmelCase:Tuple = list(self)
if len(_a) == 0:
return F'SkipList(level={self.level})'
_lowerCAmelCase:Optional[int] = max((len(str(_a)) for item in items) ,default=4)
_lowerCAmelCase:List[str] = max(_a ,4) + 4
_lowerCAmelCase:str = self.head
_lowerCAmelCase:Optional[int] = []
_lowerCAmelCase:Tuple = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(_a ,'''-''') + '''* ''' * len(_a))
lines.append(''' ''' * label_size + '''| ''' * len(_a))
while len(node.forward) != 0:
_lowerCAmelCase:Tuple = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(_a ,'''-''')
+ ''' '''.join(str(n.key) if n.key == node.key else '''|''' for n in forwards))
lines.append(''' ''' * label_size + '''| ''' * len(_a))
_lowerCAmelCase:Any = node.forward
lines.append('''None'''.ljust(_a) + '''* ''' * len(_a))
return F'SkipList(level={self.level})\n' + "\n".join(_a)
def __iter__( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:List[str] = self.head
while len(node.forward) != 0:
yield node.forward[0].key
_lowerCAmelCase:Dict = node.forward[0]
def __UpperCamelCase ( self : int) -> int:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __UpperCamelCase ( self : Optional[int] ,a__ : Union[str, Any]) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
"""simple docstring"""
_lowerCAmelCase:Dict = []
_lowerCAmelCase:str = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_lowerCAmelCase:List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_a)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __UpperCamelCase ( self : Optional[int] ,a__ : List[Any]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase:Union[str, Any] = self._locate_node(_a)
if node is not None:
for i, update_node in enumerate(_a):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_lowerCAmelCase:Any = node.forward[i]
else:
_lowerCAmelCase:Optional[int] = update_node.forward[:i]
def __UpperCamelCase ( self : Optional[int] ,a__ : Dict ,a__ : List[Any]) -> int:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase:str = self._locate_node(_a)
if node is not None:
_lowerCAmelCase:Optional[int] = value
else:
_lowerCAmelCase:str = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 ,_a):
update_vector.append(self.head)
_lowerCAmelCase:Any = level
_lowerCAmelCase:List[Any] = Node(_a ,_a)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(_a)
else:
_lowerCAmelCase:Any = new_node
def __UpperCamelCase ( self : str ,a__ : Tuple) -> VT | None:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase:Tuple = self._locate_node(_a)
if node is not None:
return node.value
return None
def UpperCAmelCase ( ):
_lowerCAmelCase:Any = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
_lowerCAmelCase:List[str] = skip_list.head
_lowerCAmelCase:Union[str, Any] = {}
while node.level != 0:
_lowerCAmelCase:Any = node.forward[0]
_lowerCAmelCase:List[str] = node.value
assert len(__snake_case ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def UpperCAmelCase ( ):
_lowerCAmelCase:Any = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
_lowerCAmelCase:List[Any] = skip_list.head
_lowerCAmelCase:Optional[int] = {}
while node.level != 0:
_lowerCAmelCase:List[Any] = node.forward[0]
_lowerCAmelCase:List[Any] = node.value
if len(__snake_case ) != 4:
print()
assert len(__snake_case ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def UpperCAmelCase ( ):
_lowerCAmelCase:Dict = SkipList()
assert skip_list.find('''Some key''' ) is None
def UpperCAmelCase ( ):
_lowerCAmelCase:Dict = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def UpperCAmelCase ( ):
_lowerCAmelCase:Optional[int] = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def UpperCAmelCase ( ):
_lowerCAmelCase:Optional[int] = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def UpperCAmelCase ( ):
_lowerCAmelCase:Tuple = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def UpperCAmelCase ( ):
_lowerCAmelCase:int = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(snake_case : List[str] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__snake_case )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def UpperCAmelCase ( ):
def is_sorted(snake_case : Any ):
return all(next_item >= item for item, next_item in zip(__snake_case , lst[1:] ) )
_lowerCAmelCase:Optional[int] = SkipList()
for i in range(10 ):
skip_list.insert(__snake_case , __snake_case )
assert is_sorted(list(__snake_case ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__snake_case ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(__snake_case ) )
def UpperCAmelCase ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def UpperCAmelCase ( ):
_lowerCAmelCase:int = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 227 |
from __future__ import annotations
import math
def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = 0 ) -> list:
'''simple docstring'''
_lowerCamelCase : str = length or len(_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_lowerCamelCase : List[Any] = list_data[i + 1], list_data[i]
_lowerCamelCase : List[Any] = True
return list_data if not swapped else bubble_sort(_lowerCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 714 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_lowerCamelCase )
_lowerCamelCase : List[Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase )
_lowerCamelCase : str = checkpoints.load_tax_checkpoint(_lowerCamelCase )
_lowerCamelCase : str = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
_lowerCamelCase : Optional[int] = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_lowerCamelCase : Optional[Any] = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : Optional[int] = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
_lowerCamelCase : Tuple = F"""layers_{str(_lowerCamelCase )}"""
# Self-Attention
_lowerCamelCase : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
_lowerCamelCase : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
_lowerCamelCase : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
_lowerCamelCase : int = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : Optional[int] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
_lowerCamelCase : Any = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
_lowerCamelCase : Any = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_lowerCamelCase : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_lowerCamelCase : List[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
_lowerCamelCase : Optional[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_lowerCamelCase : List[str] = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_lowerCamelCase : Tuple = flax_model.params["encoder"]["block"][str(_lowerCamelCase )]["layer"]
_lowerCamelCase : int = tax_attention_key
_lowerCamelCase : Union[str, Any] = tax_attention_out
_lowerCamelCase : str = tax_attention_query
_lowerCamelCase : Dict = tax_attention_value
_lowerCamelCase : str = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : Union[str, Any] = tax_global_layer_norm
if split_mlp_wi:
_lowerCamelCase : Optional[Any] = tax_mlp_wi_a
_lowerCamelCase : int = tax_mlp_wi_a
else:
_lowerCamelCase : str = tax_mlp_wi
_lowerCamelCase : Optional[int] = tax_mlp_wo
_lowerCamelCase : List[str] = tax_mlp_layer_norm
_lowerCamelCase : Tuple = flax_model_encoder_layer_block
# Only for layer 0:
_lowerCamelCase : Optional[int] = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
_lowerCamelCase : int = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : int = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
_lowerCamelCase : List[str] = tax_encoder_global_rel_embedding
# Assigning
_lowerCamelCase : List[str] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
_lowerCamelCase : int = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_lowerCamelCase : str = F"""layers_{str(_lowerCamelCase )}"""
# Self-Attention
_lowerCamelCase : Optional[int] = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
_lowerCamelCase : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
_lowerCamelCase : Dict = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
_lowerCamelCase : Any = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
_lowerCamelCase : Tuple = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
_lowerCamelCase : Optional[int] = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
_lowerCamelCase : List[str] = tax_enc_dec_attention_module["key"]["kernel"]
_lowerCamelCase : Tuple = tax_enc_dec_attention_module["out"]["kernel"]
_lowerCamelCase : Union[str, Any] = tax_enc_dec_attention_module["query"]["kernel"]
_lowerCamelCase : Any = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
_lowerCamelCase : int = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
_lowerCamelCase : Optional[int] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_lowerCamelCase : List[str] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_lowerCamelCase : str = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
_lowerCamelCase : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_lowerCamelCase : Tuple = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_lowerCamelCase : str = flax_model.params["decoder"]["block"][str(_lowerCamelCase )]["layer"]
_lowerCamelCase : Tuple = tax_attention_key
_lowerCamelCase : List[str] = tax_attention_out
_lowerCamelCase : Union[str, Any] = tax_attention_query
_lowerCamelCase : Optional[int] = tax_attention_value
_lowerCamelCase : Optional[Any] = tax_pre_attention_layer_norm
_lowerCamelCase : Tuple = tax_enc_dec_attention_key
_lowerCamelCase : List[str] = tax_enc_dec_attention_out
_lowerCamelCase : Tuple = tax_enc_dec_attention_query
_lowerCamelCase : Tuple = tax_enc_dec_attention_value
_lowerCamelCase : Optional[Any] = tax_cross_layer_norm
if split_mlp_wi:
_lowerCamelCase : List[Any] = tax_mlp_wi_a
_lowerCamelCase : List[Any] = tax_mlp_wi_a
else:
_lowerCamelCase : Dict = tax_mlp_wi
_lowerCamelCase : Union[str, Any] = tax_mlp_wo
_lowerCamelCase : Dict = txa_mlp_layer_norm
_lowerCamelCase : Optional[int] = flax_model_decoder_layer_block
# Decoder Normalization
_lowerCamelCase : Tuple = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
_lowerCamelCase : Union[str, Any] = txa_decoder_norm
# Only for layer 0:
_lowerCamelCase : int = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
_lowerCamelCase : List[Any] = tax_decoder_rel_embedding
# Token Embeddings
_lowerCamelCase : Union[str, Any] = tax_model["target"]["token_embedder"]["embedding"]
_lowerCamelCase : Any = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_lowerCamelCase : Tuple = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
_lowerCAmelCase : int = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path) | 386 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
SCREAMING_SNAKE_CASE : Any = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class A_ ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_SCREAMING_SNAKE_CASE = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_SCREAMING_SNAKE_CASE = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _UpperCAmelCase ( self : str ):
__a = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
__a = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.5_04}] )
__a = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}] )
__a = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
__a = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.5_04}] )
# Legacy behavior
__a = text_classifier("This is great !" , return_all_scores=__SCREAMING_SNAKE_CASE )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.5_04}] )
__a = text_classifier("This is great !" , return_all_scores=__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}]] )
__a = text_classifier(["This is great !", "Something else"] , return_all_scores=__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
__a = text_classifier(["This is great !", "Something else"] , return_all_scores=__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
{"label": "LABEL_0", "score": 0.5_04},
{"label": "LABEL_0", "score": 0.5_04},
] , )
@require_torch
def _UpperCAmelCase ( self : List[str] ):
import torch
__a = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
__a = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@require_tf
def _UpperCAmelCase ( self : Any ):
__a = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
__a = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@slow
@require_torch
def _UpperCAmelCase ( self : Dict ):
__a = pipeline("text-classification" )
__a = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 1.0}] )
__a = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{"label": "NEGATIVE", "score": 1.0}] )
__a = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 0.9_88}] )
@slow
@require_tf
def _UpperCAmelCase ( self : Union[str, Any] ):
__a = pipeline("text-classification" , framework="tf" )
__a = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 1.0}] )
__a = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{"label": "NEGATIVE", "score": 1.0}] )
__a = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{"label": "POSITIVE", "score": 0.9_88}] )
def _UpperCAmelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict ):
__a = TextClassificationPipeline(model=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _UpperCAmelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] ):
__a = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a = "HuggingFace is in"
__a = text_classifier(__SCREAMING_SNAKE_CASE )
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) , [{"label": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
__a = ["HuggingFace is in ", "Paris is in France"]
__a = text_classifier(__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [{"label": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE )}, {"label": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a = text_classifier(__SCREAMING_SNAKE_CASE , top_k=__SCREAMING_SNAKE_CASE )
__a = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [[{"label": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE )}] * N, [{"label": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE )}] * N] , )
__a = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
__a = text_classifier(__SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {"label": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
text_classifier(__SCREAMING_SNAKE_CASE )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [{"label": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 197 | from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Optional[int] = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 | 1 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__: Optional[Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(lowerCAmelCase__ )
from datasets import load_dataset
lowercase__: Optional[Any] = load_dataset('nielsr/rvlcdip-demo' )
lowercase__: Any = dataset['train'][0]['image'].convert('RGB' )
lowercase__: Optional[int] = image_processor(lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase__: Any = model(**lowerCAmelCase__ )
lowercase__: Dict = outputs.logits
lowercase__: Optional[int] = torch.Size((1, 16) )
self.assertEqual(logits.shape , lowerCAmelCase__ )
lowercase__: Union[str, Any] = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=lowerCAmelCase__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 335 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __a :
__lowercase : float
__lowercase : TreeNode | None = None
__lowercase : TreeNode | None = None
def snake_case_ ( snake_case ) -> bool:
# Validation
def is_valid_tree(snake_case ) -> bool:
if node is None:
return True
if not isinstance(snake_case , snake_case ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(snake_case ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
snake_case , snake_case , snake_case ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , snake_case , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , snake_case )
)
return is_binary_search_tree_recursive_check(snake_case , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 1 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
A : str = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : tuple , __magic_name__ : Path , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Tuple=False , ) -> str:
"""simple docstring"""
output_path.parent.mkdir(parents=__magic_name__ , exist_ok=__magic_name__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__magic_name__ , __magic_name__ , f=output_path.as_posix() , input_names=__magic_name__ , output_names=__magic_name__ , dynamic_axes=__magic_name__ , do_constant_folding=__magic_name__ , use_external_data_format=__magic_name__ , enable_onnx_checker=__magic_name__ , opset_version=__magic_name__ , )
else:
export(
__magic_name__ , __magic_name__ , f=output_path.as_posix() , input_names=__magic_name__ , output_names=__magic_name__ , dynamic_axes=__magic_name__ , do_constant_folding=__magic_name__ , opset_version=__magic_name__ , )
@torch.no_grad()
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int , __magic_name__ : bool = False ) -> int:
"""simple docstring"""
lowercase__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase__ = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
lowercase__ = """cpu"""
lowercase__ = Path(__magic_name__ )
# VAE DECODER
lowercase__ = AutoencoderKL.from_pretrained(model_path + """/vae""" )
lowercase__ = vae_decoder.config.latent_channels
# forward only through the decoder part
lowercase__ = vae_decoder.decode
onnx_export(
__magic_name__ , model_args=(
torch.randn(1 , __magic_name__ , 25 , 25 ).to(device=__magic_name__ , dtype=__magic_name__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__magic_name__ , )
del vae_decoder
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=1_4,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
A : Dict = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 15 |
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : list[int] ) -> list[int]: # This function is recursive
"""simple docstring"""
lowercase__ = len(__magic_name__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ = array[0]
lowercase__ = False
lowercase__ = 1
lowercase__ = []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ = True
lowercase__ = [element for element in array[i:] if element >= array[i]]
lowercase__ = longest_subsequence(__magic_name__ )
if len(__magic_name__ ) > len(__magic_name__ ):
lowercase__ = temp_array
else:
i += 1
lowercase__ = [element for element in array[1:] if element >= pivot]
lowercase__ = [pivot, *longest_subsequence(__magic_name__ )]
if len(__magic_name__ ) > len(__magic_name__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowerCamelCase : Union[str, Any] = """bart"""
_lowerCamelCase : Dict = True
@st.cache(allow_output_mutation=__UpperCAmelCase )
def __a ( ) -> List[str]:
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
SCREAMING_SNAKE_CASE : List[Any] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
SCREAMING_SNAKE_CASE : Optional[Any] = qar_model.eval()
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = (None, None)
if MODEL_TYPE == "bart":
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
SCREAMING_SNAKE_CASE : Tuple = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
SCREAMING_SNAKE_CASE : List[Any] = sas_model.eval()
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def __a ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE : int = faiss.StandardGpuResources()
SCREAMING_SNAKE_CASE : List[Any] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
SCREAMING_SNAKE_CASE : Tuple = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
SCREAMING_SNAKE_CASE : Any = faiss.IndexFlatIP(128 )
SCREAMING_SNAKE_CASE : Optional[int] = faiss.index_cpu_to_gpu(__UpperCAmelCase , 1 , __UpperCAmelCase )
wikiaab_gpu_index_flat.add(__UpperCAmelCase ) # TODO fix for larger GPU
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = (None, None)
SCREAMING_SNAKE_CASE : List[str] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def __a ( ) -> int:
SCREAMING_SNAKE_CASE : int = datasets.load_dataset('eli5' , name='LFQA_reddit' )
SCREAMING_SNAKE_CASE : Dict = elia['train_eli5']
SCREAMING_SNAKE_CASE : Union[str, Any] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
SCREAMING_SNAKE_CASE : Any = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__UpperCAmelCase )
return (elia_train, eli5_train_q_index)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = load_indexes()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = load_models()
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = load_train_data()
def __a ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any]=10 ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_questions_for_retrieval([question] , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = eli5_train_q_index.search(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = [elia_train[int(__UpperCAmelCase )] for i in I[0]]
return nn_examples
def __a ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any]="wiki40b" , __lowerCAmelCase : Optional[int]="dense" , __lowerCAmelCase : str=10 ) -> Optional[int]:
if source == "none":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = query_qa_dense_index(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = query_es_index(
__UpperCAmelCase , __UpperCAmelCase , index_name='english_wiki40b_snippets_100w' , n_results=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE : Any = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
SCREAMING_SNAKE_CASE : Tuple = 'question: {} context: {}'.format(__UpperCAmelCase , __UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowerCAmelCase : None),
} )
def __a ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.95 , __lowerCAmelCase : Optional[int]=0.8 ) -> Any:
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = qa_sas_generate(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_answers=1 , num_beams=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase , do_sample=__UpperCAmelCase , temp=__UpperCAmelCase , top_p=__UpperCAmelCase , top_k=__UpperCAmelCase , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_lowerCamelCase : List[str] = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_lowerCamelCase : str = """\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowerCamelCase : str = """\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowerCamelCase : int = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_lowerCamelCase : str = st.sidebar.checkbox("""Demo options""")
if demo_options:
_lowerCamelCase : int = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_lowerCamelCase : Optional[Any] = action_list.index(action_st)
_lowerCamelCase : List[str] = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_lowerCamelCase : str = show_type == """Show full text of passages"""
else:
_lowerCamelCase : Tuple = 3
_lowerCamelCase : List[str] = True
_lowerCamelCase : int = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_lowerCamelCase : Any = """\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n """
st.sidebar.markdown(retriever_info)
_lowerCamelCase : List[Any] = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_lowerCamelCase : Optional[int] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_lowerCamelCase : int = """wiki40b"""
_lowerCamelCase : List[Any] = """dense"""
_lowerCamelCase : int = """beam"""
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Optional[Any] = 64
_lowerCamelCase : Optional[int] = 256
_lowerCamelCase : List[Any] = None
_lowerCamelCase : int = None
_lowerCamelCase : int = st.sidebar.checkbox("""Generation options""")
if generate_options:
_lowerCamelCase : List[str] = """\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n """
st.sidebar.markdown(generate_info)
_lowerCamelCase : int = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_lowerCamelCase : str = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_lowerCamelCase : Tuple = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_lowerCamelCase : Tuple = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowerCamelCase : Union[str, Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowerCamelCase : List[str] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowerCamelCase : List[Any] = None
# start main text
_lowerCamelCase : Optional[int] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_lowerCamelCase : int = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowerCamelCase : int = st.text_input("""Enter your question here:""", """""")
else:
_lowerCamelCase : Dict = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowerCamelCase , _lowerCamelCase : int = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_lowerCamelCase , _lowerCamelCase : Dict = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_lowerCamelCase : Any = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowerCamelCase : List[Any] = support_list[:10]
_lowerCamelCase : List[Any] = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_lowerCamelCase , _lowerCamelCase : int = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowerCamelCase , _lowerCamelCase : Dict = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_lowerCamelCase : Optional[int] = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_lowerCamelCase : int = res[1].strip()
if sec_titles == "":
_lowerCamelCase : Tuple = """[{}]({})""".format(res[0], wiki_url)
else:
_lowerCamelCase : Union[str, Any] = sec_titles.split(""" & """)
_lowerCamelCase : List[str] = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_lowerCamelCase : Dict = find_nearest_training(question)
_lowerCamelCase : Union[str, Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_lowerCamelCase : Tuple = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_lowerCamelCase : List[str] = """\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True) | 713 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class lowercase :
'''simple docstring'''
def __init__( self : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
SCREAMING_SNAKE_CASE : int = deepcopy(snake_case )
elif os.path.exists(snake_case ):
with io.open(snake_case , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE : List[str] = json.load(snake_case )
else:
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = baseaa.urlsafe_baadecode(snake_case ).decode('utf-8' )
SCREAMING_SNAKE_CASE : Any = json.loads(snake_case )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
SCREAMING_SNAKE_CASE : Tuple = config
self.set_stage_and_offload()
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_value('zero_optimization.stage' , -1 )
# offload
SCREAMING_SNAKE_CASE : int = False
if self.is_zeroa() or self.is_zeroa():
SCREAMING_SNAKE_CASE : Union[str, Any] = set(['cpu', 'nvme'] )
SCREAMING_SNAKE_CASE : Tuple = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
SCREAMING_SNAKE_CASE : List[Any] = True
def lowerCamelCase_ ( self : List[str] , snake_case : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE : List[str] = ds_key_long.split('.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = nodes.pop()
for node in nodes:
SCREAMING_SNAKE_CASE : List[str] = config.get(snake_case )
if config is None:
return None, ds_key
return config, ds_key
def lowerCamelCase_ ( self : Dict , snake_case : Any , snake_case : Any=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.find_config_node(snake_case )
if config is None:
return default
return config.get(snake_case , snake_case )
def lowerCamelCase_ ( self : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Tuple=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE : str = ds_key_long.split('.' )
for node in nodes:
SCREAMING_SNAKE_CASE : List[Any] = config
SCREAMING_SNAKE_CASE : List[Any] = config.get(snake_case )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(snake_case )
def lowerCamelCase_ ( self : Optional[int] , snake_case : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_value(snake_case )
return False if value is None else bool(snake_case )
def lowerCamelCase_ ( self : Dict , snake_case : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_value(snake_case )
return False if value is None else not bool(snake_case )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return self._stage == 2
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self._stage == 3
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self._offload
class lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , snake_case : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = engine
def lowerCamelCase_ ( self : str , snake_case : Optional[int] , **snake_case : Any ):
'''simple docstring'''
self.engine.backward(snake_case , **snake_case )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
def __init__( self : Any , snake_case : int ):
'''simple docstring'''
super().__init__(snake_case , device_placement=snake_case , scaler=snake_case )
SCREAMING_SNAKE_CASE : Dict = hasattr(self.optimizer , 'overflow' )
def lowerCamelCase_ ( self : Optional[int] , snake_case : Optional[Any]=None ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case : int , snake_case : Any ):
'''simple docstring'''
super().__init__(snake_case , snake_case )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class lowercase :
'''simple docstring'''
def __init__( self : Tuple , snake_case : Optional[Any] , snake_case : Any=0.001 , snake_case : Tuple=0 , **snake_case : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = params
SCREAMING_SNAKE_CASE : Optional[int] = lr
SCREAMING_SNAKE_CASE : Tuple = weight_decay
SCREAMING_SNAKE_CASE : int = kwargs
class lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case : int , snake_case : Optional[int]=None , snake_case : Any=0 , **snake_case : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = optimizer
SCREAMING_SNAKE_CASE : List[Any] = total_num_steps
SCREAMING_SNAKE_CASE : Optional[Any] = warmup_num_steps
SCREAMING_SNAKE_CASE : int = kwargs | 308 | 0 |
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
__A : int = _modexpt(a__ ,exponent // 2 ,a__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(a__ ,exponent - 1 ,a__ )) % modulo_value
def __SCREAMING_SNAKE_CASE ( a__ : int = 1777 ,a__ : int = 1855 ,a__ : int = 8 ) -> int:
__A : Dict = base
for _ in range(1 ,a__ ):
__A : Optional[Any] = _modexpt(a__ ,a__ ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_ ( lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = str(lowercase_ )
return len(lowercase_ ) == 9 and set(lowercase_ ) == set('''123456789''' )
def lowerCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
__SCREAMING_SNAKE_CASE : List[str] = 10_0002 * base_num
if is_9_pandigital(lowercase_ ):
return candidate
for base_num in range(333 , 99 , -1 ):
__SCREAMING_SNAKE_CASE : List[Any] = 100_2003 * base_num
if is_9_pandigital(lowercase_ ):
return candidate
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 674 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=4 , ) -> Dict:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_choices
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_attention_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ = config_and_inputs
A__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ = config_and_inputs
A__ = True
A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = True
__lowerCamelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained("roberta-base" , from_pt=lowercase_ )
A__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase_ )
| 704 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
A__ = 1
A__ = 1
while repunit:
A__ = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
A__ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 626 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __A ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict=0.9_9_9 , _SCREAMING_SNAKE_CASE : Optional[Any]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : str ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Dict = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Tuple = [e.name for e in KarrasDiffusionSchedulers]
snake_case__ : List[Any] = 2
@register_to_config
def __init__( self , a__ = 1000 , a__ = 0.00085 , a__ = 0.012 , a__ = "linear" , a__ = None , a__ = "epsilon" , a__ = False , a__ = False , a__ = 1.0 , a__ = "linspace" , a__ = 0 , ):
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : int = torch.tensor(a__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : Any = torch.linspace(a__ , a__ , a__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : Optional[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : int = betas_for_alpha_bar(a__ , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
__SCREAMING_SNAKE_CASE : str = betas_for_alpha_bar(a__ , alpha_transform_type="exp" )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
__SCREAMING_SNAKE_CASE : Dict = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : Any = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_karras_sigmas
def a_ ( self , a__ , a__=None ):
if schedule_timesteps is None:
__SCREAMING_SNAKE_CASE : Tuple = self.timesteps
__SCREAMING_SNAKE_CASE : Any = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__SCREAMING_SNAKE_CASE : List[str] = 1 if len(a__ ) > 1 else 0
else:
__SCREAMING_SNAKE_CASE : int = timestep.cpu().item() if torch.is_tensor(a__ ) else timestep
__SCREAMING_SNAKE_CASE : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def a_ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def a_ ( self , a__ , a__ , ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.index_for_timestep(a__ )
__SCREAMING_SNAKE_CASE : Any = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def a_ ( self , a__ , a__ = None , a__ = None , ):
__SCREAMING_SNAKE_CASE : Optional[Any] = num_inference_steps
__SCREAMING_SNAKE_CASE : Optional[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__SCREAMING_SNAKE_CASE : Tuple = np.linspace(0 , num_train_timesteps - 1 , a__ , dtype=a__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__SCREAMING_SNAKE_CASE : List[str] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : str = (np.arange(0 , a__ ) * step_ratio).round()[::-1].copy().astype(a__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__SCREAMING_SNAKE_CASE : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : List[str] = (np.arange(a__ , 0 , -step_ratio )).round().copy().astype(a__ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__SCREAMING_SNAKE_CASE : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__SCREAMING_SNAKE_CASE : List[Any] = np.log(a__ )
__SCREAMING_SNAKE_CASE : Optional[int] = np.interp(a__ , np.arange(0 , len(a__ ) ) , a__ )
if self.config.use_karras_sigmas:
__SCREAMING_SNAKE_CASE : Optional[int] = self._convert_to_karras(in_sigmas=a__ , num_inference_steps=self.num_inference_steps )
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([self._sigma_to_t(a__ , a__ ) for sigma in sigmas] )
__SCREAMING_SNAKE_CASE : List[str] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(a__ ).to(device=a__ )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a__ ).startswith("mps" ):
# mps does not support float64
__SCREAMING_SNAKE_CASE : Optional[int] = timesteps.to(a__ , dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE : Tuple = timesteps.to(device=a__ )
# empty dt and derivative
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__SCREAMING_SNAKE_CASE : Optional[Any] = defaultdict(a__ )
def a_ ( self , a__ , a__ ):
# get log sigma
__SCREAMING_SNAKE_CASE : Optional[Any] = np.log(a__ )
# get distribution
__SCREAMING_SNAKE_CASE : Dict = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__SCREAMING_SNAKE_CASE : Any = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = low_idx + 1
__SCREAMING_SNAKE_CASE : Tuple = log_sigmas[low_idx]
__SCREAMING_SNAKE_CASE : Union[str, Any] = log_sigmas[high_idx]
# interpolate sigmas
__SCREAMING_SNAKE_CASE : Tuple = (low - log_sigma) / (low - high)
__SCREAMING_SNAKE_CASE : int = np.clip(a__ , 0 , 1 )
# transform interpolation to time range
__SCREAMING_SNAKE_CASE : List[str] = (1 - w) * low_idx + w * high_idx
__SCREAMING_SNAKE_CASE : str = t.reshape(sigma.shape )
return t
def a_ ( self , a__ , a__ ):
__SCREAMING_SNAKE_CASE : float = in_sigmas[-1].item()
__SCREAMING_SNAKE_CASE : float = in_sigmas[0].item()
__SCREAMING_SNAKE_CASE : List[str] = 7.0 # 7.0 is the value used in the paper
__SCREAMING_SNAKE_CASE : Dict = np.linspace(0 , 1 , a__ )
__SCREAMING_SNAKE_CASE : int = sigma_min ** (1 / rho)
__SCREAMING_SNAKE_CASE : str = sigma_max ** (1 / rho)
__SCREAMING_SNAKE_CASE : Dict = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def a_ ( self ):
return self.dt is None
def a_ ( self , a__ , a__ , a__ , a__ = True , ):
__SCREAMING_SNAKE_CASE : int = self.index_for_timestep(a__ )
# advance index counter by 1
__SCREAMING_SNAKE_CASE : int = timestep.cpu().item() if torch.is_tensor(a__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE : int = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE : List[str] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__SCREAMING_SNAKE_CASE : List[Any] = self.sigmas[step_index - 1]
__SCREAMING_SNAKE_CASE : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : Any = sigma_hat if self.state_in_first_order else sigma_next
__SCREAMING_SNAKE_CASE : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : List[str] = sigma_hat if self.state_in_first_order else sigma_next
__SCREAMING_SNAKE_CASE : Dict = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE : int = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE : Optional[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__SCREAMING_SNAKE_CASE : Any = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__SCREAMING_SNAKE_CASE : List[Any] = sigma_next - sigma_hat
# store for 2nd order step
__SCREAMING_SNAKE_CASE : Tuple = derivative
__SCREAMING_SNAKE_CASE : Optional[int] = dt
__SCREAMING_SNAKE_CASE : Any = sample
else:
# 2. 2nd order / Heun's method
__SCREAMING_SNAKE_CASE : Dict = (sample - pred_original_sample) / sigma_next
__SCREAMING_SNAKE_CASE : Dict = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__SCREAMING_SNAKE_CASE : Tuple = self.dt
__SCREAMING_SNAKE_CASE : Tuple = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Optional[int] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a__ )
def a_ ( self , a__ , a__ , a__ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__SCREAMING_SNAKE_CASE : Any = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a__ ):
# mps does not support float64
__SCREAMING_SNAKE_CASE : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Tuple = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE : Dict = self.timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE : List[Any] = timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.index_for_timestep(a__ , a__ ) for t in timesteps]
__SCREAMING_SNAKE_CASE : Any = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE : Any = sigma.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE : Dict = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 211 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : int = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case__ : Union[str, Any] = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ : Dict = False
snake_case__ : Optional[int] = False
def a_ ( self , a__ , a__ , a__=False ):
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if model_class in get_values(a__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , ):
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : str = batch_size
__SCREAMING_SNAKE_CASE : int = seq_length
__SCREAMING_SNAKE_CASE : Any = is_training
__SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
__SCREAMING_SNAKE_CASE : Dict = use_labels
__SCREAMING_SNAKE_CASE : List[Any] = vocab_size
__SCREAMING_SNAKE_CASE : str = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Any = num_choices
__SCREAMING_SNAKE_CASE : List[str] = scope
__SCREAMING_SNAKE_CASE : Optional[int] = embedding_size
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = TFMobileBertModel(config=a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ )
__SCREAMING_SNAKE_CASE : Any = [input_ids, input_mask]
__SCREAMING_SNAKE_CASE : str = model(a__ )
__SCREAMING_SNAKE_CASE : int = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Tuple = TFMobileBertForMaskedLM(config=a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : str = TFMobileBertForNextSentencePrediction(config=a__ )
__SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Tuple = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = TFMobileBertForPreTraining(config=a__ )
__SCREAMING_SNAKE_CASE : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Optional[int] = model(a__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Any = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = TFMobileBertForSequenceClassification(config=a__ )
__SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Dict = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[Any] = self.num_choices
__SCREAMING_SNAKE_CASE : int = TFMobileBertForMultipleChoice(config=a__ )
__SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : Any = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : List[str] = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : int = TFMobileBertForTokenClassification(config=a__ )
__SCREAMING_SNAKE_CASE : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Any = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Dict = TFMobileBertForQuestionAnswering(config=a__ )
__SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : Union[str, Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = TFMobileBertModelTest.TFMobileBertModelTester(self )
__SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*a__ )
@slow
def a_ ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__SCREAMING_SNAKE_CASE : Any = TFMobileBertModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE : str = model(a__ )[0]
__SCREAMING_SNAKE_CASE : Dict = [1, 6, 30522]
self.assertEqual(output.shape , a__ )
__SCREAMING_SNAKE_CASE : str = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-4 )
| 211 | 1 |
def lowercase_ ( __snake_case : int = 10_00 ) -> int:
'''simple docstring'''
snake_case__ :Union[str, Any] = 2**power
snake_case__ :List[str] = 0
while n:
snake_case__ , snake_case__ :Dict = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 57 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( _A , _A , _A ):
@register_to_config
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int:
super().__init__()
snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :Any = False
snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase )
snake_case__ :Tuple = TaConfig(
vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,)
snake_case__ :List[str] = nn.ModuleList()
for lyr_num in range(UpperCamelCase ):
snake_case__ :List[Any] = TaBlock(UpperCamelCase )
self.encoders.append(UpperCamelCase )
snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase )
snake_case__ :Any = nn.Dropout(p=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :str = self.token_embedder(UpperCamelCase )
snake_case__ :int = encoder_input_tokens.shape[1]
snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase )
snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase )
# inverted the attention mask
snake_case__ :Optional[Any] = encoder_input_tokens.size()
snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase )
for lyr in self.encoders:
snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0]
snake_case__ :List[Any] = self.layer_norm(UpperCamelCase )
return self.dropout_post(UpperCamelCase ), encoder_inputs_mask | 57 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , a , a):
lowercase__ : Any = params
lowercase__ : str = np.array(a)
lowercase__ : List[Any] = np.array([len(a) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , a):
return (self.token_ids[index], self.lengths[index])
def __len__( self):
return len(self.lengths)
def snake_case_ ( self):
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def snake_case_ ( self):
lowercase__ : Any = self.params.max_model_input_size
lowercase__ : int = self.lengths > max_len
logger.info(f"""Splitting {sum(a)} too long sequences.""")
def divide_chunks(a , a):
return [l[i : i + n] for i in range(0 , len(a) , a)]
lowercase__ : str = []
lowercase__ : Optional[int] = []
if self.params.mlm:
lowercase__ , lowercase__ : int = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowercase__ , lowercase__ : List[str] = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
lowercase__ : str = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
lowercase__ : Dict = np.insert(a , 0 , a)
if sub_s[-1] != sep_id:
lowercase__ : List[str] = np.insert(a , len(a) , a)
assert len(a) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(a)
new_tok_ids.extend(a)
new_lengths.extend([len(a) for l in sub_seqs])
lowercase__ : Union[str, Any] = np.array(a)
lowercase__ : Tuple = np.array(a)
def snake_case_ ( self):
lowercase__ : Optional[int] = len(self)
lowercase__ : int = self.lengths > 11
lowercase__ : int = self.token_ids[indices]
lowercase__ : List[str] = self.lengths[indices]
lowercase__ : List[Any] = len(self)
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""")
def snake_case_ ( self):
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowercase__ : int = self.params.special_tok_ids['unk_token']
lowercase__ : Any = len(self)
lowercase__ : Optional[Any] = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
lowercase__ : List[str] = (unk_occs / self.lengths) < 0.5
lowercase__ : Optional[int] = self.token_ids[indices]
lowercase__ : str = self.lengths[indices]
lowercase__ : Union[str, Any] = len(self)
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""")
def snake_case_ ( self):
if not self.params.is_master:
return
logger.info(f"""{len(self)} sequences""")
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def snake_case_ ( self , a):
lowercase__ : List[Any] = [t[0] for t in batch]
lowercase__ : Optional[Any] = [t[1] for t in batch]
assert len(a) == len(a)
# Max for paddings
lowercase__ : Optional[Any] = max(a)
# Pad token ids
if self.params.mlm:
lowercase__ : Optional[Any] = self.params.special_tok_ids['pad_token']
else:
lowercase__ : Any = self.params.special_tok_ids['unk_token']
lowercase__ : Union[str, Any] = [list(t.astype(a)) + [pad_idx] * (max_seq_len_ - len(a)) for t in token_ids]
assert len(tk_) == len(a)
assert all(len(a) == max_seq_len_ for t in tk_)
lowercase__ : Tuple = torch.tensor(tk_) # (bs, max_seq_len_)
lowercase__ : Union[str, Any] = torch.tensor(a) # (bs)
return tk_t, lg_t
| 164 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Dict = """luke"""
def __init__( self , a=5_0267 , a=50_0000 , a=768 , a=256 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=1e-12 , a=True , a=None , a=1 , a=0 , a=2 , **a , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
lowercase__ : Tuple = vocab_size
lowercase__ : Optional[Any] = entity_vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : List[str] = entity_emb_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : List[str] = hidden_act
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Dict = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : Any = layer_norm_eps
lowercase__ : Optional[Any] = use_entity_aware_attention
lowercase__ : Union[str, Any] = classifier_dropout
| 164 | 1 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 706 |
import os
import sys
import unittest
a_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = find_backend(''' if not is_torch_available():''' )
self.assertEqual(A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__magic_name__ = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(A , '''torch_and_transformers_and_onnx''' )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A )
self.assertIn('''torch_and_transformers''' , A )
self.assertIn('''flax_and_transformers''' , A )
self.assertIn('''torch_and_transformers_and_onnx''' , A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A , '''\nCONSTANT = None\n''' )
__magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__magic_name__ = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A , A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A ) | 678 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Optional[Any] = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 404 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Dict = 16
__lowerCamelCase : int = 32
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 ):
"""simple docstring"""
_UpperCamelCase =AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCamelCase =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCamelCase =datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCamelCase =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCamelCase =16
elif accelerator.mixed_precision != "no":
_UpperCamelCase =8
else:
_UpperCamelCase =None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
_UpperCamelCase =DataLoader(
tokenized_datasets['''train'''] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_UpperCamelCase =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : Optional[int] = mocked_dataloaders # noqa: F811
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __SCREAMING_SNAKE_CASE ) == "1":
_UpperCamelCase =2
# New Code #
_UpperCamelCase =int(args.gradient_accumulation_steps )
# Initialize accelerator
_UpperCamelCase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__SCREAMING_SNAKE_CASE )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase =config['''lr''']
_UpperCamelCase =int(config['''num_epochs'''] )
_UpperCamelCase =int(config['''seed'''] )
_UpperCamelCase =int(config['''batch_size'''] )
_UpperCamelCase =evaluate.load('''glue''' , '''mrpc''' )
set_seed(__SCREAMING_SNAKE_CASE )
_UpperCamelCase , _UpperCamelCase =get_dataloaders(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase =model.to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase =AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
_UpperCamelCase =get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase =accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__SCREAMING_SNAKE_CASE ):
_UpperCamelCase =model(**__SCREAMING_SNAKE_CASE )
_UpperCamelCase =output.loss
accelerator.backward(__SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase =model(**__SCREAMING_SNAKE_CASE )
_UpperCamelCase =outputs.logits.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
_UpperCamelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __SCREAMING_SNAKE_CASE )
def _a ():
"""simple docstring"""
_UpperCamelCase =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__SCREAMING_SNAKE_CASE , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_UpperCamelCase =parser.parse_args()
_UpperCamelCase ={'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 404 | 1 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__snake_case =typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__snake_case =typing.Union[np.floataa, int, float] # noqa: UP007
def a_ ( lowerCamelCase : Vector , lowerCamelCase : Vector ):
return np.sqrt(np.sum((np.asarray(lowerCamelCase ) - np.asarray(lowerCamelCase )) ** 2 ) )
def a_ ( lowerCamelCase : Vector , lowerCamelCase : Vector ):
return sum((va - va) ** 2 for va, va in zip(lowerCamelCase , lowerCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def a_ ( ):
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 513 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : int = '''canine'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int=7_6_8 , UpperCAmelCase__ : Dict=1_2 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : List[str]=3_0_7_2 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=1_6_3_8_4 , UpperCAmelCase__ : int=1_6 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Dict=1E-12 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : List[str]=0XE_0_0_0 , UpperCAmelCase__ : Union[str, Any]=0XE_0_0_1 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : List[str]=1_6_3_8_4 , UpperCAmelCase__ : Union[str, Any]=1_2_8 , **UpperCAmelCase__ : Dict , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
# Character config:
lowerCAmelCase = downsampling_rate
lowerCAmelCase = upsampling_kernel_size
lowerCAmelCase = num_hash_functions
lowerCAmelCase = num_hash_buckets
lowerCAmelCase = local_transformer_stride
| 513 | 1 |
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class __a ( tf.keras.layers.Layer ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=False , **UpperCAmelCase ):
'''simple docstring'''
super().__init__(**a_ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = d_embed
lowerCAmelCase_ = d_proj
lowerCAmelCase_ = cutoffs + [vocab_size]
lowerCAmelCase_ = [0] + self.cutoffs
lowerCAmelCase_ = div_val
lowerCAmelCase_ = self.cutoffs[0]
lowerCAmelCase_ = len(self.cutoffs ) - 1
lowerCAmelCase_ = self.shortlist_size + self.n_clusters
lowerCAmelCase_ = keep_order
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def lowerCamelCase_ ( self , UpperCAmelCase ):
'''simple docstring'''
if self.n_clusters > 0:
lowerCAmelCase_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=a_ , name='''cluster_weight''' )
lowerCAmelCase_ = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=a_ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCAmelCase_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=a_ , name=F"""out_projs_._{i}""" , )
self.out_projs.append(a_ )
else:
self.out_projs.append(a_ )
lowerCAmelCase_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=a_ , name=F"""out_layers_._{i}_._weight""" , )
lowerCAmelCase_ = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=a_ , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase_ = self.d_embed // (self.div_val**i)
lowerCAmelCase_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=a_ , name=F"""out_projs_._{i}""" )
self.out_projs.append(a_ )
lowerCAmelCase_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=a_ , name=F"""out_layers_._{i}_._weight""" , )
lowerCAmelCase_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=a_ , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(a_ )
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase_ = x
if proj is not None:
lowerCAmelCase_ = tf.einsum('''ibd,ed->ibe''' , a_ , a_ )
return tf.einsum('''ibd,nd->ibn''' , a_ , a_ ) + b
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = shape_list(a_ )
lowerCAmelCase_ = tf.range(lp_size[0] , dtype=target.dtype )
lowerCAmelCase_ = tf.stack([r, target] , 1 )
return tf.gather_nd(a_ , a_ )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False ):
'''simple docstring'''
lowerCAmelCase_ = 0
if self.n_clusters == 0:
lowerCAmelCase_ = self._logit(a_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCAmelCase_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=a_ , logits=a_ )
lowerCAmelCase_ = tf.nn.log_softmax(a_ , axis=-1 )
else:
lowerCAmelCase_ = shape_list(a_ )
lowerCAmelCase_ = []
lowerCAmelCase_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCAmelCase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCAmelCase_ = (target >= l_idx) & (target < r_idx)
lowerCAmelCase_ = tf.where(a_ )
lowerCAmelCase_ = tf.boolean_mask(a_ , a_ ) - l_idx
if self.div_val == 1:
lowerCAmelCase_ = self.out_layers[0][0][l_idx:r_idx]
lowerCAmelCase_ = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCAmelCase_ = self.out_layers[i][0]
lowerCAmelCase_ = self.out_layers[i][1]
if i == 0:
lowerCAmelCase_ = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCAmelCase_ = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCAmelCase_ = self._logit(a_ , a_ , a_ , self.out_projs[0] )
lowerCAmelCase_ = tf.nn.log_softmax(a_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCAmelCase_ = tf.boolean_mask(a_ , a_ )
lowerCAmelCase_ = self._gather_logprob(a_ , a_ )
else:
lowerCAmelCase_ = self._logit(a_ , a_ , a_ , self.out_projs[i] )
lowerCAmelCase_ = tf.nn.log_softmax(a_ )
lowerCAmelCase_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCAmelCase_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(a_ )
if target is not None:
lowerCAmelCase_ = tf.boolean_mask(a_ , a_ )
lowerCAmelCase_ = tf.boolean_mask(a_ , a_ )
lowerCAmelCase_ = self._gather_logprob(a_ , a_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(a_ , -cur_logprob , shape_list(a_ ) )
lowerCAmelCase_ = tf.concat(a_ , axis=-1 )
if target is not None:
if return_mean:
lowerCAmelCase_ = tf.reduce_mean(a_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(a_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(a_ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out | 552 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
a = logging.get_logger(__name__)
logging.set_verbosity_info()
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
snake_case: Optional[Any] =XLMProphetNetForConditionalGenerationOld.from_pretrained(__UpperCAmelCase )
snake_case , snake_case: str =XLMProphetNetForConditionalGeneration.from_pretrained(
__UpperCAmelCase , output_loading_info=__UpperCAmelCase )
else:
snake_case: Optional[int] =ProphetNetForConditionalGenerationOld.from_pretrained(__UpperCAmelCase )
snake_case , snake_case: List[str] =ProphetNetForConditionalGeneration.from_pretrained(
__UpperCAmelCase , output_loading_info=__UpperCAmelCase )
snake_case: str =['key_proj', 'value_proj', 'query_proj']
snake_case: str ={
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
snake_case: List[str] =key.split('.' )
if attributes[0] == "lm_head":
snake_case: Dict =prophet
snake_case: List[str] =prophet_old
else:
snake_case: Any =prophet.prophetnet
snake_case: int =prophet_old.model
snake_case: int =False
for attribute in attributes:
if attribute in mapping:
snake_case: Dict =mapping[attribute]
if not hasattr(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) > 0:
snake_case: Optional[int] =attribute
elif hasattr(__UpperCAmelCase , __UpperCAmelCase ):
snake_case: Optional[Any] =attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
snake_case: Optional[Any] =old_model.weight
logger.info(f'''{attribute} is initialized.''' )
snake_case: List[str] =True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
snake_case: List[Any] =old_model.bias
logger.info(f'''{attribute} is initialized''' )
snake_case: List[Any] =True
break
elif attribute in special_keys and hasattr(__UpperCAmelCase , 'in_proj_weight' ):
snake_case: Optional[int] =old_model.in_proj_weight.shape[0] // 3
snake_case: List[str] =getattr(__UpperCAmelCase , __UpperCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
snake_case: int =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
snake_case: Any =nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
snake_case: Union[str, Any] =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
snake_case: List[str] =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
snake_case: List[str] =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
snake_case: Union[str, Any] =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
snake_case: Any =True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
snake_case: Dict =nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
snake_case: Dict =True
break
if attribute.isdigit():
snake_case: int =model[int(__UpperCAmelCase )]
snake_case: Optional[int] =old_model[int(__UpperCAmelCase )]
else:
snake_case: List[str] =getattr(__UpperCAmelCase , __UpperCAmelCase )
if old_attribute == "":
snake_case: Union[str, Any] =old_model
else:
if not hasattr(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
snake_case: List[Any] =getattr(__UpperCAmelCase , __UpperCAmelCase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 350 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : str = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Tuple = 'data2vec-text'
def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> str:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = classifier_dropout
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 174 | '''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
__lowercase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
__lowercase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
__lowercase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
__lowercase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
__lowercase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
__lowercase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
__lowercase : Optional[int] = field(
default=10000 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
__lowercase : Optional[float] = field(default=2E-4 , metadata={'help': 'Learning rate fo training.'} )
__lowercase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
__lowercase : Optional[int] = field(
default=750 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
__lowercase : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
__lowercase : Optional[bool] = field(
default=_UpperCamelCase , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
__lowercase : Optional[int] = field(default=50000 , metadata={'help': 'Maximum number of training steps.'} )
__lowercase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
__lowercase : Optional[int] = field(default=1024 , metadata={'help': 'Sequence lengths used for training.'} )
__lowercase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
__lowercase : Optional[int] = field(
default=1024 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
__lowercase : Optional[bool] = field(default=_UpperCamelCase , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
__lowercase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
__lowercase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
__lowercase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
__lowercase : Optional[int] = field(default=1024 , metadata={'help': 'Length of sequences to be evaluated.'} )
__lowercase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
__lowercase : Optional[int] = field(default=_UpperCamelCase , metadata={'help': 'Number of workers used for code evaluation.'} )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
__lowercase : Optional[bool] = field(
default=_UpperCamelCase , metadata={'help': 'Sample from the language model\'s output distribution.'} )
__lowercase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
__lowercase : Optional[int] = field(default=256 , metadata={'help': 'Maximum number of newly generated tokens.'} )
__lowercase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
__lowercase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
__lowercase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
__lowercase : Optional[int] = field(
default=200 , metadata={'help': 'Number of completions to generate for each sample.'} )
__lowercase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
__lowercase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
__lowercase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
__lowercase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
__lowercase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
__lowercase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
__lowercase : Optional[int] = field(
default=100000 , metadata={'help': 'Number of files to save per JSON output file.'} )
__lowercase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
__lowercase : Optional[float] = field(
default=1000 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
__lowercase : Optional[float] = field(
default=100 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
__lowercase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
__lowercase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
__lowercase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
__lowercase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
__lowercase : Optional[bool] = field(
default=_UpperCamelCase , metadata={'help': 'If True, near-duplicate samples are removed.'} )
__lowercase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
__lowercase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
__lowercase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
__lowercase : Optional[int] = field(default=200000 , metadata={'help': 'Number of examples to train tokenizer on.'} )
__lowercase : Optional[int] = field(
default=32768 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
__lowercase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
__lowercase : Optional[bool] = field(default=_UpperCamelCase , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
__lowercase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
__lowercase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
__lowercase : Optional[int] = field(default=_UpperCamelCase , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
__lowercase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
__lowercase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
__lowercase : Optional[bool] = field(default=_UpperCamelCase , metadata={'help': 'Push saved tokenizer to the hub.'} )
| 174 | 1 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 | 1 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE__ = {
"allenai/led-base-16384": 16_384,
}
class lowerCAmelCase_ ( __a ):
"""simple docstring"""
_lowerCAmelCase : int = VOCAB_FILES_NAMES
_lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Tuple = LEDTokenizer
_lowerCAmelCase : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="replace" , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<mask>" , lowerCAmelCase=False , lowerCAmelCase=True , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , A__ ) != add_prefix_space:
snake_case = getattr(A__ , pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**A__ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space' , A__ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets' , A__ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(A__ , state.pop('type' ) )
snake_case = component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def snake_case ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
snake_case = value
def snake_case ( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
snake_case = kwargs.get('is_split_into_words' , A__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*A__ , **A__ )
def snake_case ( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
snake_case = kwargs.get('is_split_into_words' , A__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*A__ , **A__ )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
snake_case = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase=None ):
"""simple docstring"""
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase = None , lowerCAmelCase = None , ):
"""simple docstring"""
snake_case = super()._pad(
encoded_inputs=A__ , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(A__ )
if needs_to_be_padded:
snake_case = len(A__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 700 | """simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
@require_torch
def snake_case ( self ):
"""simple docstring"""
snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase )
BertModel.from_pretrained(lowerCAmelCase )
BertTokenizer.from_pretrained(lowerCAmelCase )
pipeline(task='fill-mask' , model=lowerCAmelCase )
# baseline - just load from_pretrained with normal network
snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
snake_case = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case = '1'
snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def snake_case ( self ):
"""simple docstring"""
snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
snake_case = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCAmelCase )
BertModel.from_pretrained(lowerCAmelCase )
BertTokenizer.from_pretrained(lowerCAmelCase )
pipeline(task='fill-mask' , model=lowerCAmelCase )
# baseline - just load from_pretrained with normal network
snake_case = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
snake_case = self.get_env()
snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def snake_case ( self ):
"""simple docstring"""
snake_case = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
snake_case = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
snake_case = self.get_env()
snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case = '1'
snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def snake_case ( self ):
"""simple docstring"""
snake_case = '\nfrom transformers import pipeline\n '
snake_case = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
snake_case = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
snake_case = self.get_env()
snake_case = '1'
snake_case = [sys.executable, '-c', '\n'.join([load, mock, run] )]
snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def snake_case ( self ):
"""simple docstring"""
snake_case = '\nfrom transformers import AutoModel\n '
snake_case = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
snake_case = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
snake_case = self.get_env()
snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case = '1'
snake_case = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 104 | 0 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , lowercase_ : Tuple ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] =hidden_states.shape
_lowerCamelCase : Optional[int] =jax.image.resize(
lowercase_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
_lowerCamelCase : Dict =self.conv(lowercase_ )
return hidden_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Tuple =nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , lowercase_ : int ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : str =self.conv(lowercase_ )
return hidden_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int =None
UpperCamelCase__ : float =0.0
UpperCamelCase__ : bool =None
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Tuple =self.in_channels if self.out_channels is None else self.out_channels
_lowerCamelCase : List[Any] =nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_lowerCamelCase : Union[str, Any] =nn.Conv(
lowercase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCamelCase : Optional[Any] =nn.Dense(lowercase_ , dtype=self.dtype )
_lowerCamelCase : int =nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_lowerCamelCase : Optional[Any] =nn.Dropout(self.dropout_prob )
_lowerCamelCase : Any =nn.Conv(
lowercase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCamelCase : str =self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_lowerCamelCase : Tuple =None
if use_nin_shortcut:
_lowerCamelCase : Any =nn.Conv(
lowercase_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Any , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Dict=True ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =hidden_states
_lowerCamelCase : Optional[int] =self.norma(lowercase_ )
_lowerCamelCase : str =nn.swish(lowercase_ )
_lowerCamelCase : Optional[int] =self.conva(lowercase_ )
_lowerCamelCase : Tuple =self.time_emb_proj(nn.swish(lowercase_ ) )
_lowerCamelCase : Optional[int] =jnp.expand_dims(jnp.expand_dims(lowercase_ , 1 ) , 1 )
_lowerCamelCase : List[str] =hidden_states + temb
_lowerCamelCase : Optional[int] =self.norma(lowercase_ )
_lowerCamelCase : Optional[int] =nn.swish(lowercase_ )
_lowerCamelCase : List[str] =self.dropout(lowercase_ , lowercase_ )
_lowerCamelCase : Optional[Any] =self.conva(lowercase_ )
if self.conv_shortcut is not None:
_lowerCamelCase : Dict =self.conv_shortcut(lowercase_ )
return hidden_states + residual
| 464 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : List[Any] =XLMRobertaTokenizer
UpperCamelCase__ : Union[str, Any] =XLMRobertaTokenizerFast
UpperCamelCase__ : int =True
UpperCamelCase__ : Optional[Any] =True
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Dict =XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_lowerCamelCase : Tuple ='<pad>'
_lowerCamelCase : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : str =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowercase_ ) , 1002 )
def lowerCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
_lowerCamelCase : List[Any] =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : int =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCamelCase : List[str] =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : int =self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_lowerCamelCase : List[Any] =self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
_lowerCamelCase : int =tempfile.mkdtemp()
_lowerCamelCase : List[str] =tokenizer_r.save_pretrained(lowercase_ )
_lowerCamelCase : int =tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCamelCase : Optional[Any] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
_lowerCamelCase : int =tokenizer_r.from_pretrained(lowercase_ )
_lowerCamelCase : Any =tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
_lowerCamelCase : Dict =tempfile.mkdtemp()
_lowerCamelCase : int =tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
_lowerCamelCase : Optional[Any] =tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
_lowerCamelCase : int =tokenizer_r.from_pretrained(lowercase_ )
_lowerCamelCase : List[Any] =tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
_lowerCamelCase : str =tempfile.mkdtemp()
_lowerCamelCase : Optional[Any] =tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
_lowerCamelCase : Any =tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCamelCase : str =tokenizer_r.from_pretrained(lowercase_ )
_lowerCamelCase : List[str] =tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@cached_property
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def lowerCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase_ , f.name )
_lowerCamelCase : Union[str, Any] =XLMRobertaTokenizer(f.name , keep_accents=lowercase_ )
_lowerCamelCase : Dict =pickle.dumps(lowercase_ )
pickle.loads(lowercase_ )
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase : Any =self.get_tokenizer()
_lowerCamelCase : Optional[int] =self.get_rust_tokenizer()
_lowerCamelCase : Tuple ='I was born in 92000, and this is falsé.'
_lowerCamelCase : Any =tokenizer.tokenize(lowercase_ )
_lowerCamelCase : List[str] =rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : int =tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_lowerCamelCase : Union[str, Any] =rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_lowerCamelCase : Dict =self.get_rust_tokenizer()
_lowerCamelCase : Optional[int] =tokenizer.encode(lowercase_ )
_lowerCamelCase : Optional[Any] =rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] ='Hello World!'
_lowerCamelCase : Union[str, Any] =[0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
_lowerCamelCase : List[str] =[
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] ={'input_ids': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 464 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A__ : List[str] = 1_6
A__ : Union[str, Any] = 3_2
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase = 16 , _UpperCamelCase = "bert-base-cased" ):
"""simple docstring"""
_lowercase: Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
_lowercase: Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_lowercase: Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowercase: str = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase: List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCamelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(_lowerCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
_lowercase: Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
_lowercase: int = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase: List[Any] = config["lr"]
_lowercase: Tuple = int(config['''num_epochs'''] )
_lowercase: List[Any] = int(config['''seed'''] )
_lowercase: Union[str, Any] = int(config['''batch_size'''] )
_lowercase: Optional[int] = args.model_name_or_path
set_seed(_lowerCamelCase )
_lowercase: str = get_dataloaders(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase: str = AutoModelForSequenceClassification.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
# Instantiate optimizer
_lowercase: int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowercase: Any = optimizer_cls(params=model.parameters() , lr=_lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
_lowercase: Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_lowercase: int = 1
_lowercase: List[Any] = (len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowercase: Optional[Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=0 , num_training_steps=_lowerCamelCase , )
else:
_lowercase: Tuple = DummyScheduler(_lowerCamelCase , total_num_steps=_lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase: str = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
_lowercase: Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
_lowercase: List[Any] = 0
# Now we train the model
_lowercase: Tuple = evaluate.load('''glue''' , '''mrpc''' )
_lowercase: Dict = 0
_lowercase: Optional[Any] = {}
for epoch in range(_lowerCamelCase , _lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
_lowercase: Optional[Any] = model(**_lowerCamelCase )
_lowercase: Optional[int] = outputs.loss
_lowercase: Tuple = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_lowercase: str = 0
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase: Optional[Any] = model(**_lowerCamelCase )
_lowercase: Dict = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowercase: Optional[Any] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowerCamelCase ) - 1:
_lowercase: Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowercase: Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
_lowercase: Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowerCamelCase )
_lowercase: List[str] = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
_lowercase: Union[str, Any] = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Union[str, Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_lowerCamelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowerCamelCase , )
parser.add_argument(
'''--output_dir''' , type=_lowerCamelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=_lowerCamelCase , default=_lowerCamelCase , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=_lowerCamelCase , default=3 , help='''Number of train epochs.''' , )
_lowercase: Dict = parser.parse_args()
_lowercase: Tuple = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 717 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
A__ : Optional[Any] = '<<<<<<< This should probably be modified because it mentions: '
A__ : Any = '=======\n>>>>>>>\n'
A__ : int = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
A__ : Union[str, Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
@staticmethod
def lowercase_ ( A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: int = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=A_ , required=A_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=A_ , required=A_ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=A_ )
def __init__( self , A_ , A_ , *A_ ) -> Tuple:
"""simple docstring"""
_lowercase: Optional[Any] = get_logger('''datasets-cli/converting''' )
_lowercase: Optional[Any] = tfds_path
_lowercase: Dict = datasets_directory
def lowercase_ ( self ) -> Any:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
_lowercase: Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_lowercase: Any = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
_lowercase: Dict = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_lowercase: Tuple = []
_lowercase: List[str] = []
_lowercase: int = {}
if os.path.isdir(self._tfds_path ):
_lowercase: str = os.listdir(A_ )
else:
_lowercase: Tuple = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
_lowercase: Optional[int] = os.path.join(A_ , A_ )
_lowercase: Optional[Any] = os.path.join(A_ , A_ )
if not os.path.isfile(A_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(A_ , encoding='''utf-8''' ) as f:
_lowercase: Any = f.readlines()
_lowercase: str = []
_lowercase: Dict = False
_lowercase: Any = False
_lowercase: Tuple = []
for line in lines:
_lowercase: str = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowercase: Optional[Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
_lowercase: Any = ''''''
continue
elif "from absl import logging" in out_line:
_lowercase: Dict = '''from datasets import logging\n'''
elif "getLogger" in out_line:
_lowercase: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_lowercase: int = True
_lowercase: List[str] = list(filter(lambda A_ : e in out_line , A_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(A_ ) + '''\n''' )
out_lines.append(A_ )
out_lines.append(A_ )
continue
else:
for pattern, replacement in TO_CONVERT:
_lowercase: Tuple = re.sub(A_ , A_ , A_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowercase: Optional[int] = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , A_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
_lowercase: Dict = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowercase: Tuple = True
out_lines.append(A_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowercase: List[str] = f_name.replace('''.py''' , '''''' )
_lowercase: Dict = os.path.join(A_ , A_ )
_lowercase: Dict = os.path.join(A_ , A_ )
os.makedirs(A_ , exist_ok=A_ )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(A_ )
if needs_manual_update:
with_manual_update.append(A_ )
with open(A_ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(A_ )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_lowercase: Optional[int] = os.path.basename(A_ )
_lowercase: List[Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(A_ , A_ )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 272 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple ) -> Any:
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ) -> Optional[int]:
_lowerCAmelCase : List[str] = [[float("""inf""" ) for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_lowerCamelCase ):
# looping through rows of graph array
for i in range(_lowerCamelCase ):
# looping through columns of graph array
for j in range(_lowerCamelCase ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_lowerCAmelCase : List[Any] = dist[i][k] + dist[k][j]
_print_dist(_lowerCamelCase , _lowerCamelCase )
return dist, v
if __name__ == "__main__":
UpperCamelCase_ = int(input("""Enter number of vertices: """))
UpperCamelCase_ = int(input("""Enter number of edges: """))
UpperCamelCase_ = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
UpperCamelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
UpperCamelCase_ = int(input("""Enter source:"""))
UpperCamelCase_ = int(input("""Enter destination:"""))
UpperCamelCase_ = float(input("""Enter weight:"""))
UpperCamelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 384 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {}
class a_ (_a ):
__lowerCAmelCase : int = """llama"""
__lowerCAmelCase : Tuple = ["""past_key_values"""]
def __init__( self , snake_case_=3_2_0_0_0 , snake_case_=4_0_9_6 , snake_case_=1_1_0_0_8 , snake_case_=3_2 , snake_case_=3_2 , snake_case_=None , snake_case_="silu" , snake_case_=2_0_4_8 , snake_case_=0.02 , snake_case_=1E-6 , snake_case_=True , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=1 , snake_case_=False , snake_case_=None , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Optional[Any] = num_key_value_heads
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[int] = rms_norm_eps
_lowerCAmelCase : Dict = pretraining_tp
_lowerCAmelCase : Any = use_cache
_lowerCAmelCase : Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , tie_word_embeddings=snake_case_ , **snake_case_ , )
def __UpperCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
_lowerCAmelCase : Optional[Any] = self.rope_scaling.get("""type""" , snake_case_ )
_lowerCAmelCase : List[Any] = self.rope_scaling.get("""factor""" , snake_case_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(snake_case_ , snake_case_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 384 | 1 |
from __future__ import annotations
from collections.abc import MutableSequence
class _a :
def __init__( self , lowercase_ , lowercase_ ) -> None:
if len(lowercase_ ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
lowerCAmelCase : list[float] = list(lowercase_ )
lowerCAmelCase : Dict = degree
def __add__( self , lowercase_ ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowerCAmelCase : Tuple = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowercase_ )
else:
lowerCAmelCase : int = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowercase_ )
def __sub__( self , lowercase_ ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , lowercase_ ) -> Polynomial:
lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowercase_ )
def _snake_case ( self , lowercase_ ) -> int | float:
lowerCAmelCase : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
lowerCAmelCase : int = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowercase_ )
return polynomial
def __repr__( self ) -> str:
return self.__str__()
def _snake_case ( self ) -> Polynomial:
lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree ):
lowerCAmelCase : Optional[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowercase_ )
def _snake_case ( self , lowercase_ = 0 ) -> Polynomial:
lowerCAmelCase : list[float] = [0] * (self.degree + 2)
lowerCAmelCase : List[Any] = constant
for i in range(self.degree + 1 ):
lowerCAmelCase : Tuple = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowercase_ )
def __eq__( self , lowercase_ ) -> bool:
if not isinstance(lowercase_ , lowercase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , lowercase_ ) -> bool:
return not self.__eq__(lowercase_ )
| 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 693 | 0 |
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A : int = logging.getLogger(__name__)
A : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
A : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
__UpperCAmelCase = field(
default=snake_case_ , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__UpperCAmelCase = field(
default=snake_case_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(snake_case_ )} , )
__UpperCAmelCase = field(
default=snake_case_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCAmelCase = field(
default=snake_case_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCAmelCase = field(
default=snake_case_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class lowerCAmelCase_ :
__UpperCAmelCase = field(
default=snake_case_ , metadata={'help': 'The input training data file (a text file).'} )
__UpperCAmelCase = field(
default=snake_case_ , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__UpperCAmelCase = field(
default=snake_case_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__UpperCAmelCase = field(
default=snake_case_ , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__UpperCAmelCase = field(
default=snake_case_ , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__UpperCAmelCase = field(
default=snake_case_ , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__UpperCAmelCase = field(
default=snake_case_ , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__UpperCAmelCase = field(default=snake_case_ , metadata={'help': 'Whether ot not to use whole word mask.'} )
__UpperCAmelCase = field(
default=0.1_5 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__UpperCAmelCase = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__UpperCAmelCase = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__UpperCAmelCase = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__UpperCAmelCase = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , ):
def _dataset(lowerCamelCase_ , lowerCamelCase_=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , ref_path=_UpperCAmelCase , )
return LineByLineTextDataset(tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_UpperCAmelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_UpperCAmelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _a ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case : Any =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case , snake_case , snake_case : Dict =parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
snake_case : Optional[int] =AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
snake_case : Any =AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
snake_case : List[str] =CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
snake_case : int =AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
snake_case : Any =AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
snake_case : Any =AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
snake_case : int =AutoModelWithLMHead.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
snake_case : Tuple =tokenizer.max_len
# Our input block size will be the max possible for the model
else:
snake_case : Dict =min(data_args.block_size , tokenizer.max_len )
# Get datasets
snake_case : Tuple =(
get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
snake_case : Dict =(
get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , evaluate=_UpperCAmelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
snake_case : List[str] =DataCollatorForPermutationLanguageModeling(
tokenizer=_UpperCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
snake_case : Union[str, Any] =DataCollatorForWholeWordMask(
tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
else:
snake_case : Dict =DataCollatorForLanguageModeling(
tokenizer=_UpperCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case : Union[str, Any] =Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_collator=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , prediction_loss_only=_UpperCAmelCase , )
# Training
if training_args.do_train:
snake_case : Tuple =(
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_UpperCAmelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case : List[Any] ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case : Union[str, Any] =trainer.evaluate()
snake_case : int =math.exp(eval_output['''eval_loss'''] )
snake_case : Any ={'''perplexity''': perplexity}
snake_case : int =os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(_UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _UpperCAmelCase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(_UpperCAmelCase )
return results
def _a ( lowerCamelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 349 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case_ : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
snake_case_ : Optional[int] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
snake_case_ : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = SqueezeBertTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
'''simple docstring'''
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase__ ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(lowerCamelCase__ , normalizer_state.pop('''type''' ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**lowerCamelCase__ )
UpperCamelCase = do_lower_case
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=None ):
'''simple docstring'''
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
UpperCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 212 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
_snake_case = MobileBertConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case = MobileBertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
_snake_case = load_tf_weights_in_mobilebert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 709 |
import random
def lowerCamelCase__ ( UpperCamelCase__ : list , UpperCamelCase__ : List[Any] ) -> tuple:
'''simple docstring'''
_snake_case , _snake_case , _snake_case = [], [], []
for element in data:
if element < pivot:
less.append(UpperCamelCase__ )
elif element > pivot:
greater.append(UpperCamelCase__ )
else:
equal.append(UpperCamelCase__ )
return less, equal, greater
def lowerCamelCase__ ( UpperCamelCase__ : list , UpperCamelCase__ : int ) -> Dict:
'''simple docstring'''
if index >= len(UpperCamelCase__ ) or index < 0:
return None
_snake_case = items[random.randint(0 , len(UpperCamelCase__ ) - 1 )]
_snake_case = 0
_snake_case , _snake_case , _snake_case = _partition(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = len(UpperCamelCase__ )
_snake_case = len(UpperCamelCase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(UpperCamelCase__ , UpperCamelCase__ )
# must be in larger
else:
return quick_select(UpperCamelCase__ , index - (m + count) )
| 541 | 0 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
_a = precision
_a = ceil(precision / 14 )
_a = 42_6880 * Decimal(1_0005 ).sqrt()
_a = 1
_a = 1359_1409
_a = Decimal(UpperCamelCase )
for k in range(1 , UpperCamelCase ):
_a = factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCamelCase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_snake_case : Optional[Any] = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 22 | '''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_A : Optional[Any] = object()
# For specifying empty leaf dict `{}`
_A : Dict = object()
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(snake_case_ ) - len(snake_case_ ) + 1 ):
__lowerCAmelCase = [x.match(snake_case_ ) for x, y in zip(snake_case_ , ks[i:] )]
if matches and all(snake_case_ ):
return True
return False
def UpperCamelCase_ ( snake_case_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
def replace(snake_case_ : Tuple , snake_case_ : Optional[Any] ):
for rule, replacement in rules:
if _match(snake_case_ , snake_case_ ):
return replacement
return val
return replace
def UpperCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , snake_case_ )),
(("transformer", "wte", "embedding"), P("""mp""" , snake_case_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(snake_case_ , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , snake_case_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(snake_case_ , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , snake_case_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCamelCase_ ( snake_case_ : Any ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = _get_partition_rules()
__lowerCAmelCase = _replacement_rules(snake_case_ )
__lowerCAmelCase = {k: _unmatched for k in flatten_dict(snake_case_ )}
__lowerCAmelCase = {k: replace(snake_case_ , snake_case_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(snake_case_ ) )
| 427 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class _SCREAMING_SNAKE_CASE ( nn.Module ):
__SCREAMING_SNAKE_CASE :int
__SCREAMING_SNAKE_CASE :jnp.dtype = jnp.floataa
def snake_case__ ( self : List[str] ):
__magic_name__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , a__ : List[str] ):
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = hidden_states.shape
__magic_name__ = jax.image.resize(
a__ , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
__magic_name__ = self.conv(a__ )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
__SCREAMING_SNAKE_CASE :int
__SCREAMING_SNAKE_CASE :jnp.dtype = jnp.floataa
def snake_case__ ( self : Any ):
__magic_name__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , a__ : Optional[int] ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__magic_name__ = self.conv(a__ )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
__SCREAMING_SNAKE_CASE :int
__SCREAMING_SNAKE_CASE :int = None
__SCREAMING_SNAKE_CASE :float = 0.0
__SCREAMING_SNAKE_CASE :bool = None
__SCREAMING_SNAKE_CASE :jnp.dtype = jnp.floataa
def snake_case__ ( self : str ):
__magic_name__ = self.in_channels if self.out_channels is None else self.out_channels
__magic_name__ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__magic_name__ = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__magic_name__ = nn.Dense(a__ , dtype=self.dtype )
__magic_name__ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__magic_name__ = nn.Dropout(self.dropout_prob )
__magic_name__ = nn.Conv(
a__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__magic_name__ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__magic_name__ = None
if use_nin_shortcut:
__magic_name__ = nn.Conv(
a__ , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self : Optional[Any] , a__ : List[str] , a__ : Dict , a__ : Optional[int]=True ):
__magic_name__ = hidden_states
__magic_name__ = self.norma(a__ )
__magic_name__ = nn.swish(a__ )
__magic_name__ = self.conva(a__ )
__magic_name__ = self.time_emb_proj(nn.swish(a__ ) )
__magic_name__ = jnp.expand_dims(jnp.expand_dims(a__ , 1 ) , 1 )
__magic_name__ = hidden_states + temb
__magic_name__ = self.norma(a__ )
__magic_name__ = nn.swish(a__ )
__magic_name__ = self.dropout(a__ , a__ )
__magic_name__ = self.conva(a__ )
if self.conv_shortcut is not None:
__magic_name__ = self.conv_shortcut(a__ )
return hidden_states + residual
| 245 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : str = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class A__ ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = "swin"
UpperCAmelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , _a : List[str]=224 , _a : Optional[int]=4 , _a : Dict=3 , _a : str=96 , _a : Union[str, Any]=[2, 2, 6, 2] , _a : str=[3, 6, 12, 24] , _a : int=7 , _a : str=4.0 , _a : int=True , _a : int=0.0 , _a : Union[str, Any]=0.0 , _a : Dict=0.1 , _a : List[Any]="gelu" , _a : Any=False , _a : List[str]=0.02 , _a : Dict=1E-5 , _a : Optional[int]=32 , _a : Tuple=None , _a : List[str]=None , **_a : Any , ) -> str:
"""simple docstring"""
super().__init__(**_a )
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embed_dim
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =num_heads
_SCREAMING_SNAKE_CASE =window_size
_SCREAMING_SNAKE_CASE =mlp_ratio
_SCREAMING_SNAKE_CASE =qkv_bias
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =drop_path_rate
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =use_absolute_embeddings
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_SCREAMING_SNAKE_CASE =int(embed_dim * 2 ** (len(_a ) - 1) )
_SCREAMING_SNAKE_CASE =['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_a ) + 1 )]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = version.parse("1.11" )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __UpperCamelCase ( self : Any ) -> float:
"""simple docstring"""
return 1E-4 | 691 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_a : List[str] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def a_ ( __magic_name__ ) -> str:
"""simple docstring"""
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def a_ ( __magic_name__ ) -> str:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__magic_name__ )
def a_ ( __magic_name__ ) -> Any:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
snake_case : List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ )
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if exitstatus == 5:
snake_case : Optional[Any] = 0
# Doctest custom flag to ignore output.
_a : List[str] = doctest.register_optionflag('IGNORE_RESULT')
_a : List[str] = doctest.OutputChecker
class a_ ( a ):
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = CustomOutputChecker
_a : Optional[Any] = HfDoctestModule
_a : Optional[int] = HfDocTestParser
| 84 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[Any]=24 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Optional[int]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[Any]=2 , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : str = patch_size
snake_case : Union[str, Any] = max_length
snake_case : str = num_mel_bins
snake_case : Any = is_training
snake_case : Union[str, Any] = use_labels
snake_case : Tuple = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : str = scope
snake_case : int = frequency_stride
snake_case : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case : Any = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case : Union[str, Any] = frequency_out_dimension * time_out_dimension
snake_case : Union[str, Any] = num_patches + 2
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case : str = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[str] = self.get_config()
return config, input_values, labels
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : str = ASTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : int = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : int = config_and_inputs
snake_case : Tuple = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class a_ ( a , a , unittest.TestCase ):
A__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
A__ : int = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Dict = False
A__ : int = False
A__ : Optional[int] = False
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = ASTModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(UpperCAmelCase__ )
snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : List[str] = ['''input_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = ASTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Dict = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
snake_case , snake_case : int = torchaudio.load(__magic_name__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class a_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : List[str] = self.default_feature_extractor
snake_case : str = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(UpperCAmelCase__ )
snake_case : str = self.default_feature_extractor
snake_case , snake_case : int = prepare_audio()
snake_case : Optional[int] = audio.squeeze().numpy()
snake_case : Optional[Any] = feature_extractor(UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**UpperCAmelCase__ )
# verify the logits
snake_case : Any = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
snake_case : str = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 84 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__lowercase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase : str = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str]=8 ):
__a : Any = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__a : Any = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=512 , _SCREAMING_SNAKE_CASE : Optional[Any]=512 ):
__a : List[str] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__a : Optional[Any] = np.array(pil_image.convert('RGB' ) )
__a : Union[str, Any] = arr.astype(np.floataa ) / 1_2_7.5 - 1
__a : Any = np.transpose(_SCREAMING_SNAKE_CASE , [2, 0, 1] )
__a : Any = torch.from_numpy(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
return image
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a , __a , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__a , scheduler=__a , movq=__a , )
__a : Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = min(int(num_inference_steps * strength ) , __a )
__a : List[str] = max(num_inference_steps - init_timestep , 0 )
__a : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a=None ):
'''simple docstring'''
if not isinstance(__a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__a )}""" )
__a : List[str] = image.to(device=__a , dtype=__a )
__a : Tuple = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__a : Union[str, Any] = image
else:
if isinstance(__a , __a ) and len(__a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__a , __a ):
__a : List[str] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__a )
]
__a : List[Any] = torch.cat(__a , dim=0 )
else:
__a : Any = self.movq.encode(__a ).latent_dist.sample(__a )
__a : Optional[int] = self.movq.config.scaling_factor * init_latents
__a : List[str] = torch.cat([init_latents] , dim=0 )
__a : str = init_latents.shape
__a : Union[str, Any] = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
# get latents
__a : Tuple = self.scheduler.add_noise(__a , __a , __a )
__a : Optional[int] = init_latents
return latents
def __UpperCAmelCase ( self , __a=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__a : Union[str, Any] = torch.device(f"""cuda:{gpu_id}""" )
__a : str = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
def __UpperCAmelCase ( self , __a=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__a : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=__a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__a : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
__a , __a : int = cpu_offload_with_hook(__a , __a , prev_module_hook=__a )
# We'll offload the last model manually.
__a : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__a )
def __call__( self , __a , __a , __a , __a = 512 , __a = 512 , __a = 100 , __a = 4.0 , __a = 0.3 , __a = 1 , __a = None , __a = "pil" , __a = True , ):
'''simple docstring'''
__a : Dict = self._execution_device
__a : Optional[int] = guidance_scale > 1.0
if isinstance(__a , __a ):
__a : int = torch.cat(__a , dim=0 )
__a : Union[str, Any] = image_embeds.shape[0]
if isinstance(__a , __a ):
__a : Dict = torch.cat(__a , dim=0 )
if do_classifier_free_guidance:
__a : Tuple = image_embeds.repeat_interleave(__a , dim=0 )
__a : int = negative_image_embeds.repeat_interleave(__a , dim=0 )
__a : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__a )
if not isinstance(__a , __a ):
__a : Any = [image]
if not all(isinstance(__a , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(__a ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
__a : Dict = torch.cat([prepare_image(__a , __a , __a ) for i in image] , dim=0 )
__a : str = image.to(dtype=image_embeds.dtype , device=__a )
__a : int = self.movq.encode(__a )['latents']
__a : List[Any] = latents.repeat_interleave(__a , dim=0 )
self.scheduler.set_timesteps(__a , device=__a )
__a , __a : int = self.get_timesteps(__a , __a , __a )
__a : Optional[int] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__a , __a : Any = downscale_height_and_width(__a , __a , self.movq_scale_factor )
__a : List[Any] = self.prepare_latents(
__a , __a , __a , __a , image_embeds.dtype , __a , __a )
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the latents if we are doing classifier free guidance
__a : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a : List[Any] = {'image_embeds': image_embeds}
__a : Optional[Any] = self.unet(
sample=__a , timestep=__a , encoder_hidden_states=__a , added_cond_kwargs=__a , return_dict=__a , )[0]
if do_classifier_free_guidance:
__a , __a : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
__a , __a : List[Any] = noise_pred.chunk(2 )
__a , __a : Dict = variance_pred.chunk(2 )
__a : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__a : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__a , __a : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__a : str = self.scheduler.step(
__a , __a , __a , generator=__a , )[0]
# post-processing
__a : Union[str, Any] = self.movq.decode(__a , force_not_quantize=__a )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__a : Union[str, Any] = image * 0.5 + 0.5
__a : List[str] = image.clamp(0 , 1 )
__a : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__a : Any = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 476 |
'''simple docstring'''
from math import ceil
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 1_001 ):
__a : Union[str, Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__a : Optional[Any] = 2 * i + 1
__a : Dict = 2 * i
__a : List[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__lowercase : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 476 | 1 |
from collections import deque
from .hash_table import HashTable
class __SCREAMING_SNAKE_CASE ( __lowercase):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCamelCase )
lowerCAmelCase__ = self.values[key]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase )
| 365 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : str = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
__snake_case : int = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def _UpperCamelCase ( UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase__ = int(re.match(r'.*layer_(\d*).*' , UpperCamelCase_ )[1] )
layer_number -= 3
return F"h.{layer_number}." + key
def _UpperCamelCase ( UpperCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase__ = re.search(r'[^\d](\d+)$' , str(UpperCamelCase_ ) )
if bit_search is None:
raise ValueError(F"`dtype` is not a valid dtype: {dtype}." )
lowerCAmelCase__ = int(bit_search.groups()[0] )
return bit_size // 8
def _UpperCamelCase ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if bloom_config_file == "":
lowerCAmelCase__ = BloomConfig()
else:
lowerCAmelCase__ = BloomConfig.from_json_file(UpperCamelCase_ )
if shard_model:
lowerCAmelCase__ = os.listdir(UpperCamelCase_ )
lowerCAmelCase__ = sorted(filter(lambda UpperCamelCase_ : s.startswith('layer' ) and "model_00" in s , UpperCamelCase_ ) )
lowerCAmelCase__ = {'weight_map': {}, 'metadata': {}}
lowerCAmelCase__ = 0
lowerCAmelCase__ = None
lowerCAmelCase__ = BloomConfig()
for j, file in enumerate(UpperCamelCase_ ):
print('Processing file: {}'.format(UpperCamelCase_ ) )
lowerCAmelCase__ = None
for i in range(UpperCamelCase_ ):
# load all TP files
lowerCAmelCase__ = file.replace('model_00' , F"model_0{i}" )
lowerCAmelCase__ = torch.load(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , map_location='cpu' )
# Rename keys in the transformers names
lowerCAmelCase__ = list(temp.keys() )
for key in keys:
lowerCAmelCase__ = temp.pop(UpperCamelCase_ )
if tensors is None:
lowerCAmelCase__ = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase__ = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase__ = tensors[key] / pretraining_tp
torch.save(
UpperCamelCase_ , os.path.join(
UpperCamelCase_ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase__ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase__ = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase_ ) ).zfill(5 ) )
lowerCAmelCase__ = BloomConfig()
lowerCAmelCase__ = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCAmelCase__ = total_size
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCamelCase_ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
lowerCAmelCase__ = json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + '\n'
f.write(UpperCamelCase_ )
else:
lowerCAmelCase__ = BloomModel(UpperCamelCase_ )
lowerCAmelCase__ = os.listdir(UpperCamelCase_ )
lowerCAmelCase__ = sorted(filter(lambda UpperCamelCase_ : s.startswith('layer' ) and "model_00" in s , UpperCamelCase_ ) )
lowerCAmelCase__ = None
for i, file in enumerate(UpperCamelCase_ ):
lowerCAmelCase__ = None
for i in range(UpperCamelCase_ ):
# load all TP files
lowerCAmelCase__ = file.replace('model_00' , F"model_0{i}" )
lowerCAmelCase__ = torch.load(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , map_location='cpu' )
# Rename keys in the transformers names
lowerCAmelCase__ = list(temp.keys() )
for key in keys:
lowerCAmelCase__ = temp.pop(UpperCamelCase_ )
if tensors is None:
lowerCAmelCase__ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase__ = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase__ = tensors[key] / pretraining_tp
lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert not other_keys.unexpected_keys, F"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
lowerCAmelCase__ = set(other_keys.missing_keys )
else:
lowerCAmelCase__ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowerCAmelCase__ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCAmelCase__ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
lowerCAmelCase__ = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCamelCase_ )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
__snake_case : str = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 365 | 1 |
__lowerCAmelCase : int = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase : str = {value: key for key, value in MORSE_CODE_DICT.items()}
def a__ ( A_ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def a__ ( A_ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def a__ ( ):
'''simple docstring'''
__magic_name__ = """Morse code here!"""
print(A_ )
__magic_name__ = encrypt(A_ )
print(A_ )
__magic_name__ = decrypt(A_ )
print(A_ )
if __name__ == "__main__":
main()
| 529 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCAmelCase : Optional[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _lowercase ( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = ZeroShotClassificationPipeline(
model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _lowercase ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(UpperCamelCase__ , {"""sequence""": ANY(UpperCamelCase__ ), """labels""": [ANY(UpperCamelCase__ )], """scores""": [ANY(UpperCamelCase__ )]} )
# No kwarg
__magic_name__ = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(UpperCamelCase__ , {"""sequence""": ANY(UpperCamelCase__ ), """labels""": [ANY(UpperCamelCase__ )], """scores""": [ANY(UpperCamelCase__ )]} )
__magic_name__ = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(UpperCamelCase__ , {"""sequence""": ANY(UpperCamelCase__ ), """labels""": [ANY(UpperCamelCase__ )], """scores""": [ANY(UpperCamelCase__ )]} )
__magic_name__ = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
UpperCamelCase__ , {"""sequence""": ANY(UpperCamelCase__ ), """labels""": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )], """scores""": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
__magic_name__ = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
UpperCamelCase__ , {"""sequence""": ANY(UpperCamelCase__ ), """labels""": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )], """scores""": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
__magic_name__ = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(UpperCamelCase__ , {"""sequence""": ANY(UpperCamelCase__ ), """labels""": [ANY(UpperCamelCase__ )], """scores""": [ANY(UpperCamelCase__ )]} )
# https://github.com/huggingface/transformers/issues/13846
__magic_name__ = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """labels""": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )], """scores""": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )]}
for i in range(1 )
] , )
__magic_name__ = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
UpperCamelCase__ , [
{"""sequence""": ANY(UpperCamelCase__ ), """labels""": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )], """scores""": [ANY(UpperCamelCase__ ), ANY(UpperCamelCase__ )]}
for i in range(2 )
] , )
with self.assertRaises(UpperCamelCase__ ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(UpperCamelCase__ ):
classifier(UpperCamelCase__ , candidate_labels="""politics""" )
with self.assertRaises(UpperCamelCase__ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(UpperCamelCase__ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(UpperCamelCase__ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=UpperCamelCase__ , )
self.run_entailment_id(UpperCamelCase__ )
def _lowercase ( self : Dict , UpperCamelCase__ : Pipeline ) -> Dict:
"""simple docstring"""
__magic_name__ = zero_shot_classifier.model.config
__magic_name__ = config.labelaid
__magic_name__ = zero_shot_classifier.entailment_id
__magic_name__ = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__magic_name__ = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__magic_name__ = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__magic_name__ = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__magic_name__ = original_labelaid
self.assertEqual(UpperCamelCase__ , zero_shot_classifier.entailment_id )
@require_torch
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__magic_name__ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
__magic_name__ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@require_tf
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
__magic_name__ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
__magic_name__ = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
__magic_name__ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
__magic_name__ = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def _lowercase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
__magic_name__ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
__magic_name__ = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
| 529 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
) | 161 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowerCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase_ = {
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
lowerCamelCase_ = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : int = ElectraTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase="[UNK]" , lowerCamelCase="[SEP]" , lowerCamelCase="[PAD]" , lowerCamelCase="[CLS]" , lowerCamelCase="[MASK]" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Union[str, Any]:
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(lowerCamelCase , normalizer_state.pop("""type""" ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**lowerCamelCase )
snake_case_ = do_lower_case
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase=None ) -> Dict:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase ) | 161 | 1 |
"""simple docstring"""
def __magic_name__ ( UpperCamelCase : int = 3 , UpperCamelCase : int = 7 , UpperCamelCase : int = 1000000 ) -> Dict:
a__ = 0
a__ = 1
for current_denominator in range(1 , limit + 1 ):
a__ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
a__ = current_numerator
a__ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 273 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _snake_case ( A_ : Any , A_ : Any=False ):
"""simple docstring"""
a_ : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a_ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _snake_case ( A_ : Dict , A_ : str , A_ : List[str]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
a_ : Optional[int] = """"""
else:
a_ : str = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a_ : int = state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''' )
a_ : Optional[int] = state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
a_ : Dict = in_proj_bias[: config.hidden_size]
a_ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a_ : str = in_proj_weight[
-config.hidden_size :, :
]
a_ : List[str] = in_proj_bias[-config.hidden_size :]
def _snake_case ( A_ : List[Any] ):
"""simple docstring"""
a_ : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
def _snake_case ( A_ : Optional[int] ):
"""simple docstring"""
a_ : Any = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
def _snake_case ( A_ : Optional[int] , A_ : List[Any] , A_ : Any ):
"""simple docstring"""
a_ : List[str] = dct.pop(A_ )
a_ : Optional[Any] = val
def _snake_case ( A_ : Any , A_ : int ):
"""simple docstring"""
a_ : Any = ViTMSNConfig()
a_ : Optional[Any] = 1000
a_ : int = """datasets/huggingface/label-files"""
a_ : Optional[int] = """imagenet-1k-id2label.json"""
a_ : Any = json.load(open(hf_hub_download(A_ , A_ ) , """r""" ) )
a_ : str = {int(A_ ): v for k, v in idalabel.items()}
a_ : Optional[int] = idalabel
a_ : Optional[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
a_ : Union[str, Any] = 384
a_ : Tuple = 1536
a_ : Dict = 6
elif "l16" in checkpoint_url:
a_ : Tuple = 1024
a_ : int = 4096
a_ : Tuple = 24
a_ : Any = 16
a_ : Dict = 0.1
elif "b4" in checkpoint_url:
a_ : Optional[Any] = 4
elif "l7" in checkpoint_url:
a_ : List[str] = 7
a_ : List[Any] = 1024
a_ : str = 4096
a_ : Tuple = 24
a_ : List[Any] = 16
a_ : List[str] = 0.1
a_ : Optional[Any] = ViTMSNModel(A_ )
a_ : Tuple = torch.hub.load_state_dict_from_url(A_ , map_location="""cpu""" )["""target_encoder"""]
a_ : Optional[int] = ViTImageProcessor(size=config.image_size )
remove_projection_head(A_ )
a_ : List[Any] = create_rename_keys(A_ , base_model=A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
read_in_q_k_v(A_ , A_ , base_model=A_ )
model.load_state_dict(A_ )
model.eval()
a_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ : Optional[Any] = Image.open(requests.get(A_ , stream=A_ ).raw )
a_ : Optional[Any] = ViTImageProcessor(
size=config.image_size , image_mean=A_ , image_std=A_ )
a_ : str = image_processor(images=A_ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
a_ : Any = model(**A_ )
a_ : Optional[int] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
a_ : Tuple = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
a_ : List[Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
a_ : List[str] = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
a_ : Tuple = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
a_ : List[Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , A_ , atol=1E-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A_ )
if __name__ == "__main__":
__snake_case: int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__snake_case: str = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 577 | 0 |
"""simple docstring"""
import heapq
import sys
import numpy as np
SCREAMING_SNAKE_CASE__:Optional[int] = tuple[int, int]
class snake_case__ :
def __init__( self ):
__a = []
__a = set()
def a__ ( self ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def a__ ( self ):
return len(self.elements ) == 0
def a__ ( self , lowerCamelCase , lowerCamelCase ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCamelCase )
else:
# update
# print("update", item)
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def a__ ( self , lowerCamelCase ):
if item in self.set:
self.set.remove(lowerCamelCase )
__a = []
((__a) , (__a)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__a) , (__a)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def a__ ( self ):
return self.elements[0][1]
def a__ ( self ):
((__a) , (__a)) = heapq.heappop(self.elements )
self.set.remove(lowerCamelCase )
return (priority, item)
def _lowerCamelCase( a , a ):
# euclidean distance
__a = np.array(a )
__a = np.array(a )
return np.linalg.norm(a - b )
def _lowerCamelCase( a , a ):
# integer division by time variable
return consistent_heuristic(a , a ) // t
def _lowerCamelCase( a , a ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCamelCase( a , a , a , a ):
__a = g_function[start] + Wa * heuristics[i](a , a )
return ans
def _lowerCamelCase( a , a , a ):
__a = np.chararray((n, n) )
for i in range(a ):
for j in range(a ):
__a = "*"
for i in range(a ):
for j in range(a ):
if (j, (n - 1) - i) in blocks:
__a = "#"
__a = "-"
__a = back_pointer[goal]
while x != start:
((__a) , (__a)) = x
# print(x)
__a = "-"
__a = back_pointer[x]
__a = "-"
for i in range(a ):
for j in range(a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__a = back_pointer[goal]
while x != start:
print(a , end=" " )
__a = back_pointer[x]
print(a )
sys.exit()
def _lowerCamelCase( a ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCamelCase( a , a , a , a , a , a , a , a , ):
for itera in range(a ):
open_list[itera].remove_element(a )
# print("s", s)
# print("j", j)
((__a) , (__a)) = s
__a = (x - 1, y)
__a = (x + 1, y)
__a = (x, y + 1)
__a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a )
__a = -1
__a = float("inf" )
if valid(a ) and g_function[neighbours] > g_function[s] + 1:
__a = g_function[s] + 1
__a = s
if neighbours not in close_list_anchor:
open_list[0].put(a , key(a , 0 , a , a ) )
if neighbours not in close_list_inad:
for var in range(1 , a ):
if key(a , a , a , a ) <= Wa * key(
a , 0 , a , a ):
open_list[j].put(
a , key(a , a , a , a ) )
def _lowerCamelCase( ):
__a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
SCREAMING_SNAKE_CASE__:Any = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
SCREAMING_SNAKE_CASE__:str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
SCREAMING_SNAKE_CASE__:int = make_common_ground()
SCREAMING_SNAKE_CASE__:List[str] = blocks_blk
# hyper parameters
SCREAMING_SNAKE_CASE__:str = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 1
SCREAMING_SNAKE_CASE__:Union[str, Any] = 20
SCREAMING_SNAKE_CASE__:Dict = 3 # one consistent and two other inconsistent
# start and end destination
SCREAMING_SNAKE_CASE__:Dict = (0, 0)
SCREAMING_SNAKE_CASE__:Optional[Any] = (n - 1, n - 1)
SCREAMING_SNAKE_CASE__:List[str] = 1
def _lowerCamelCase( a , a , a ):
__a = {start: 0, goal: float("inf" )}
__a = {start: -1, goal: -1}
__a = []
__a = set()
for i in range(a ):
open_list.append(PriorityQueue() )
open_list[i].put(a , key(a , a , a , a ) )
__a = []
__a = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a , __a = open_list[i].top_show()
visited.add(a )
expand_state(
a , a , a , a , a , a , a , a , )
close_list_inad.append(a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(a , a , a )
else:
__a = open_list[0].top_show()
visited.add(a )
expand_state(
a , 0 , a , a , a , a , a , a , )
close_list_anchor.append(a )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 67 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """blenderbot-small"""
_snake_case : str = ["""past_key_values"""]
_snake_case : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=512 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=2 , **lowerCamelCase , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a = {0: "batch"}
__a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__a = {0: "batch", 1: "decoder_sequence"}
__a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
else:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = super().outputs
else:
__a = super(lowerCamelCase , self ).outputs
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
__a = seq_length if not self.use_past else 1
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__a = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
__a = common_inputs["decoder_input_ids"].shape[1]
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = decoder_seq_length + 3
__a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
__a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a , __a = self.num_layers
__a = min(lowerCamelCase , lowerCamelCase )
__a = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
__a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
__a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a , __a = self.num_layers
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = common_inputs["attention_mask"].dtype
__a = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
__a = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = tokenizer.num_special_tokens_to_add(lowerCamelCase )
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__a = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
elif self.task == "causal-lm":
__a = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if self.task in ["default", "seq2seq-lm"]:
__a = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__a = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 67 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Optional[Any] = logging.get_logger(__name__)
def a_ ( lowercase__ :List[Any], lowercase__ :Tuple=False, lowercase__ :Tuple=False ):
__lowerCamelCase = """backbone.""" if is_semantic else """"""
__lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'{prefix}cls_token', """beit.embeddings.cls_token"""),
(f'{prefix}patch_embed.proj.weight', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'{prefix}patch_embed.proj.bias', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'{prefix}pos_embed', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def a_ ( lowercase__ :Tuple, lowercase__ :List[Any], lowercase__ :int=False, lowercase__ :int=False ):
for i in range(config.num_hidden_layers ):
__lowerCamelCase = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowerCamelCase = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight' )
__lowerCamelCase = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias' )
__lowerCamelCase = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias' )
__lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCamelCase = q_bias
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowerCamelCase = state_dict.pop(f'{prefix}blocks.{i}.gamma_1' )
__lowerCamelCase = state_dict.pop(f'{prefix}blocks.{i}.gamma_2' )
__lowerCamelCase = gamma_a
__lowerCamelCase = gamma_a
def a_ ( lowercase__ :int, lowercase__ :Union[str, Any], lowercase__ :Any ):
__lowerCamelCase = dct.pop(lowercase__ )
__lowerCamelCase = val
def a_ ( ):
__lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase = Image.open(requests.get(lowercase__, stream=lowercase__ ).raw )
return im
@torch.no_grad()
def a_ ( lowercase__ :Any, lowercase__ :str, lowercase__ :List[str]=False ):
__lowerCamelCase = False if """rvlcdip""" in checkpoint_url else True
__lowerCamelCase = BeitConfig(use_absolute_position_embeddings=lowercase__, use_mask_token=lowercase__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowerCamelCase = 16
__lowerCamelCase = """huggingface/label-files"""
__lowerCamelCase = """rvlcdip-id2label.json"""
__lowerCamelCase = json.load(open(hf_hub_download(lowercase__, lowercase__, repo_type="""dataset""" ), """r""" ) )
__lowerCamelCase = {int(lowercase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowerCamelCase = torch.hub.load_state_dict_from_url(lowercase__, map_location="""cpu""" )["""model"""]
__lowerCamelCase = create_rename_keys(lowercase__, has_lm_head=lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__, lowercase__, lowercase__ )
read_in_q_k_v(lowercase__, lowercase__, has_lm_head=lowercase__ )
# load HuggingFace model
__lowerCamelCase = BeitForMaskedImageModeling(lowercase__ ) if has_lm_head else BeitForImageClassification(lowercase__ )
model.eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image
__lowerCamelCase = BeitImageProcessor(
size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=lowercase__ )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=lowercase__, return_tensors="""pt""" )
__lowerCamelCase = encoding["""pixel_values"""]
__lowerCamelCase = model(lowercase__ )
__lowerCamelCase = outputs.logits
# verify logits
__lowerCamelCase = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowercase__ ), "Shape of logits not as expected"
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
if has_lm_head:
__lowerCamelCase = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowerCamelCase = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase__, lowercase__ ), organization="""nielsr""", commit_message="""Add image processor""", use_temp_dir=lowercase__, )
model.push_to_hub(
repo_path_or_name=Path(lowercase__, lowercase__ ), organization="""nielsr""", commit_message="""Add model""", use_temp_dir=lowercase__, )
if __name__ == "__main__":
__magic_name__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
__magic_name__ : str = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 281 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __snake_case (lowerCamelCase ):
__a = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 281 | 1 |
def A__ ( lowercase: List[Any], lowercase: Tuple ) -> List[str]:
A : int =len(lowerCAmelCase_ )
A : List[str] =[]
for i in range(len(lowerCAmelCase_ ) - pat_len + 1 ):
A : Optional[Any] =True
for j in range(lowerCAmelCase_ ):
if s[i + j] != pattern[j]:
A : Optional[Any] =False
break
if match_found:
position.append(lowerCAmelCase_ )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 707 | import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Optional[int] =get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] = XLMRobertaTokenizer
lowercase : Dict = XLMRobertaTokenizerFast
lowercase : str = True
lowercase : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A : List[str] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
A : List[str] ='<pad>'
A : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
A : List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
A : Union[str, Any] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A : Any =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A : Tuple =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A : Union[str, Any] =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A : Any =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : List[Any] =self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Dict =self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : str =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A : List[str] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Dict =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
A : Optional[int] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
A : List[Any] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A : List[Any] =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , f.name )
A : Optional[Any] =XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE__ )
A : int =pickle.dumps(SCREAMING_SNAKE_CASE__ )
pickle.loads(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
A : Union[str, Any] =self.get_tokenizer()
A : int =self.get_rust_tokenizer()
A : List[str] ='I was born in 92000, and this is falsé.'
A : Union[str, Any] =tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A : Tuple =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.get_rust_tokenizer()
A : int =tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A : Dict =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : Any ='Hello World!'
A : Optional[Any] =[0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
A : Any =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A : int =[
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
# fmt: off
A : List[Any] ={'input_ids': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 661 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =TransfoXLTokenizer
__a =False
__a =False
def UpperCamelCase__ ( self : List[Any] ):
super().setUp()
_a = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase__ ( self : int , **__a : Optional[Any] ):
_a = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__a )
def UpperCamelCase__ ( self : Dict , __a : Union[str, Any] ):
_a = "<unk> UNwanted , running"
_a = "<unk> unwanted, running"
return input_text, output_text
def UpperCamelCase__ ( self : Dict ):
_a = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__a )
_a = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(__a , ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [0, 4, 8, 7] )
def UpperCamelCase__ ( self : Dict ):
_a = TransfoXLTokenizer(lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["hello", "!", "how", "are", "you", "?"] )
def UpperCamelCase__ ( self : Dict ):
_a = TransfoXLTokenizer(lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = TransfoXLTokenizer(lower_case=__a )
_a = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
_a = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(__a ) , __a )
self.assertEqual(tokenizer.convert_tokens_to_string(__a ) , __a )
def UpperCamelCase__ ( self : Any ):
_a = self.get_tokenizer()
_a = len(__a )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__a ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , "new1" )
| 692 |
'''simple docstring'''
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : str , *__a : Any , __a : str=None , __a : Union[str, Any]=None , **__a : Any ):
super().__init__(*__a , **__a )
_a = eval_examples
_a = post_process_function
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict=None , __a : Any=None , __a : str=None , __a : str = "eval" ):
_a = self.eval_dataset if eval_dataset is None else eval_dataset
_a = self.get_eval_dataloader(__a )
_a = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_a = self.compute_metrics
_a = None
_a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_a = time.time()
try:
_a = eval_loop(
__a , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
_a = compute_metrics
_a = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_a = self.post_process_function(__a , __a , output.predictions )
_a = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
_a = metrics.pop(__a )
metrics.update(output.metrics )
else:
_a = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __a )
return metrics
def UpperCamelCase__ ( self : Tuple , __a : Dict , __a : Optional[Any] , __a : Optional[Any]=None , __a : str = "test" ):
_a = self.get_test_dataloader(__a )
# Temporarily disable metric computation, we will do it in the loop here.
_a = self.compute_metrics
_a = None
_a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_a = time.time()
try:
_a = eval_loop(
__a , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
_a = compute_metrics
_a = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_a = self.post_process_function(__a , __a , output.predictions , "predict" )
_a = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
_a = metrics.pop(__a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__a )
| 692 | 1 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCamelCase :int = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
lowerCamelCase :Optional[int] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
lowerCamelCase :int = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def _a (self , lowercase , lowercase , lowercase=None , lowercase=True , lowercase=False ):
if rouge_types is None:
A_ : List[Any] = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
A_ : List[Any] = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
A_ : Tuple = scoring.BootstrapAggregator()
else:
A_ : Dict = []
for ref, pred in zip(lowercase , lowercase ):
A_ : Any = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
A_ : List[str] = aggregator.aggregate()
else:
A_ : List[str] = {}
for key in scores[0]:
A_ : Dict = [score[key] for score in scores]
return result | 686 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a (self ):
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : int = CLIPTextModel(lowercase )
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a (self , lowercase , lowercase=0 ):
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : int = image / 2 + 0.5
if str(lowercase ).startswith("""mps""" ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a (self ):
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = CycleDiffusionPipeline(**lowercase )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = self.get_dummy_inputs(lowercase )
A_ : str = pipe(**lowercase )
A_ : str = output.images
A_ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a (self ):
A_ : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase , """half""" ):
A_ : List[str] = module.half()
A_ : List[Any] = CycleDiffusionPipeline(**lowercase )
A_ : Optional[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Any = self.get_dummy_inputs(lowercase )
A_ : Tuple = pipe(**lowercase )
A_ : List[str] = output.images
A_ : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a (self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _a (self ):
return super().test_inference_batch_single_identical()
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@skip_mps
def _a (self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A_ : List[str] = init_image.resize((512, 512) )
A_ : Dict = """CompVis/stable-diffusion-v1-4"""
A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : Any = CycleDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : str = """A black colored car"""
A_ : Dict = """A blue colored car"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : str = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a (self ):
A_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A_ : Optional[int] = init_image.resize((512, 512) )
A_ : Optional[int] = """CompVis/stable-diffusion-v1-4"""
A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : Optional[Any] = """A black colored car"""
A_ : int = """A blue colored car"""
A_ : str = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 686 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _snake_case ( A_ : List[str] ):
"""simple docstring"""
a_ : str = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
a_ : List[str] = True if """large""" in model_name or """huge""" in model_name else False
a_ : Tuple = True if """large""" in model_name or """huge""" in model_name else False
a_ : List[str] = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ : List[Any] = [3, 3, 3, 3]
a_ : Any = [5, 5, 5, 5]
elif "fl4" in model_name:
a_ : str = [4, 4, 4, 4]
a_ : Dict = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ : Any = [3, 3, 3, 3]
if "lrf" in model_name:
a_ : int = [3, 3, 3, 3]
else:
a_ : Any = [2, 2, 2, 2]
if "tiny" in model_name:
a_ : List[str] = 96
elif "small" in model_name:
a_ : List[Any] = 96
elif "base" in model_name:
a_ : List[Any] = 128
elif "large" in model_name:
a_ : List[Any] = 192
elif "xlarge" in model_name:
a_ : Optional[int] = 256
elif "huge" in model_name:
a_ : Dict = 352
# set label information
a_ : Any = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
a_ : Dict = """imagenet-22k-id2label.json"""
else:
a_ : Dict = """imagenet-1k-id2label.json"""
a_ : Optional[Any] = json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) )
a_ : Tuple = {int(_A ): v for k, v in idalabel.items()}
a_ : Optional[int] = {v: k for k, v in idalabel.items()}
a_ : int = FocalNetConfig(
embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , )
return config
def _snake_case ( A_ : Optional[Any] ):
"""simple docstring"""
if "patch_embed.proj" in name:
a_ : Union[str, Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
a_ : List[str] = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
a_ : Optional[Any] = """encoder.""" + name
if "encoder.layers" in name:
a_ : Union[str, Any] = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
a_ : Optional[Any] = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
a_ : List[str] = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ : Optional[int] = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ : str = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ : Optional[Any] = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
a_ : Dict = """layernorm.weight"""
if name == "norm.bias":
a_ : str = """layernorm.bias"""
if "head" in name:
a_ : Dict = name.replace("""head""" , """classifier""" )
else:
a_ : List[str] = """focalnet.""" + name
return name
def _snake_case ( A_ : List[Any] , A_ : Tuple , A_ : Optional[Any]=False ):
"""simple docstring"""
a_ : int = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
a_ : int = model_name_to_url[model_name]
print("""Checkpoint URL: """ , _A )
a_ : List[Any] = torch.hub.load_state_dict_from_url(_A , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
a_ : int = state_dict.pop(_A )
a_ : Optional[Any] = val
a_ : Tuple = get_focalnet_config(_A )
a_ : str = FocalNetForImageClassification(_A )
model.eval()
# load state dict
model.load_state_dict(_A )
# verify conversion
a_ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ : Any = BitImageProcessor(
do_resize=_A , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , )
a_ : List[Any] = Image.open(requests.get(_A , stream=_A ).raw )
a_ : str = processor(images=_A , return_tensors="""pt""" )
a_ : Union[str, Any] = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ : int = image_transforms(_A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 )
a_ : List[str] = model(**_A )
a_ : int = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ : Optional[Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ : Any = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ : Optional[Any] = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ : str = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ : str = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ : List[str] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
__snake_case: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__snake_case: Optional[int] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 577 | import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE : List[Any] = logging.getLogger()
SCREAMING_SNAKE_CASE : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A_ ( a_ ):
def _UpperCAmelCase ( self : str , __SCREAMING_SNAKE_CASE : int ):
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
__a = {"source": "What is love ?", "target": "life"}
__a = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__a = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__SCREAMING_SNAKE_CASE , f"""{split}.{field}""" ) , "w" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str = "pytorch" ):
__a = self.get_auto_remove_tmp_dir()
__a = os.path.join(__SCREAMING_SNAKE_CASE , "output" )
__a = os.path.join(__SCREAMING_SNAKE_CASE , "data" )
self._create_dummy_data(data_dir=__SCREAMING_SNAKE_CASE )
__a = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
__a = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env() )
__a = os.path.join(__SCREAMING_SNAKE_CASE , "metrics.json" )
with open(__SCREAMING_SNAKE_CASE ) as f:
__a = json.load(__SCREAMING_SNAKE_CASE )
return result
@require_torch_gpu
def _UpperCAmelCase ( self : Dict ):
__a = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _UpperCAmelCase ( self : Optional[int] ):
__a = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _UpperCAmelCase ( self : Optional[Any] ):
__a = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _UpperCAmelCase ( self : Any ):
__a = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 197 | 0 |
from __future__ import annotations
lowerCAmelCase__ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case):
_UpperCamelCase : Tuple = graph
# mapping node to its parent in resulting breadth first tree
_UpperCamelCase : dict[str, str | None] = {}
_UpperCamelCase : Any = source_vertex
def A__ ( self):
_UpperCamelCase : Tuple = {self.source_vertex}
_UpperCamelCase : List[str] = None
_UpperCamelCase : Union[str, Any] = [self.source_vertex] # first in first out queue
while queue:
_UpperCamelCase : Any = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__snake_case)
_UpperCamelCase : Any = vertex
queue.append(__snake_case)
def A__ ( self , __snake_case):
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCamelCase : Optional[Any] = self.parent.get(__snake_case)
if target_vertex_parent is None:
_UpperCamelCase : Optional[int] = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(__snake_case)
return self.shortest_path(__snake_case) + f'''->{target_vertex}'''
if __name__ == "__main__":
lowerCAmelCase__ = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 648 |
from ...processing_utils import ProcessorMixin
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = ["image_processor", "feature_extractor"]
a__ = "TvltImageProcessor"
a__ = "TvltFeatureExtractor"
def __init__( self , __snake_case , __snake_case):
super().__init__(image_processor=__snake_case , feature_extractor=__snake_case)
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : Dict = feature_extractor
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.')
_UpperCamelCase : Union[str, Any] = None
if images is not None:
_UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case)
if images_mixed is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case)
if audio is not None:
_UpperCamelCase : Tuple = self.feature_extractor(
__snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case)
_UpperCamelCase : Tuple = {}
if audio is not None:
output_dict.update(__snake_case)
if images is not None:
output_dict.update(__snake_case)
if images_mixed_dict is not None:
output_dict.update(__snake_case)
return output_dict
@property
def A__ ( self):
_UpperCamelCase : List[Any] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 648 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : List[Any] = logging.get_logger(__name__)
_A : str = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_A : int = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
_A : Optional[Any] = {
"""gpt2""": 10_24,
"""gpt2-medium""": 10_24,
"""gpt2-large""": 10_24,
"""gpt2-xl""": 10_24,
"""distilgpt2""": 10_24,
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
lowerCamelCase__ : Tuple = GPTaTokenizer
def __init__( self , A_=None , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , **A_ , ):
'''simple docstring'''
super().__init__(
A_ , A_ , tokenizer_file=A_ , unk_token=A_ , bos_token=A_ , eos_token=A_ , add_prefix_space=A_ , **A_ , )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''add_bos_token''' , A_ )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A_ ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ = getattr(A_ , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = pre_tok_class(**A_ )
SCREAMING_SNAKE_CASE__ = add_prefix_space
def lowercase_ ( self , *A_ , **A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A_ , **A_ )
def lowercase_ ( self , *A_ , **A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A_ , **A_ )
def lowercase_ ( self , A_ , A_ = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A_ , add_special_tokens=A_ ) + [self.eos_token_id] )
if len(A_ ) > self.model_max_length:
SCREAMING_SNAKE_CASE__ = input_ids[-self.model_max_length :]
return input_ids
| 100 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'vocab.txt'}
lowercase_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
lowercase_ = {
'YituTech/conv-bert-base': 5_12,
'YituTech/conv-bert-medium-small': 5_12,
'YituTech/conv-bert-small': 5_12,
}
lowercase_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __a ( __snake_case ):
lowerCamelCase : Optional[int] =VOCAB_FILES_NAMES
lowerCamelCase : List[str] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[str] =ConvBertTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase="[UNK]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[PAD]" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ = getattr(UpperCAmelCase , normalizer_state.pop('''type''' ) )
lowerCAmelCase_ = do_lower_case
lowerCAmelCase_ = strip_accents
lowerCAmelCase_ = tokenize_chinese_chars
lowerCAmelCase_ = normalizer_class(**UpperCAmelCase )
lowerCAmelCase_ = do_lower_case
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase ) | 552 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : List[str] = 'nllb-moe'
UpperCamelCase_ : Union[str, Any] = ['past_key_values']
UpperCamelCase_ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[str] , lowerCamelCase__ : Tuple=128_112 , lowerCamelCase__ : Optional[int]=1_024 , lowerCamelCase__ : Any=12 , lowerCamelCase__ : List[str]=4_096 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : Dict=12 , lowerCamelCase__ : Any=4_096 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : List[str]=0.0_5 , lowerCamelCase__ : Optional[int]=0.0_5 , lowerCamelCase__ : Any=True , lowerCamelCase__ : int=True , lowerCamelCase__ : int="relu" , lowerCamelCase__ : Optional[Any]=1_024 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : Dict=0.0_2 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : int=False , lowerCamelCase__ : int="float32" , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Any=128 , lowerCamelCase__ : str=64 , lowerCamelCase__ : Any=4 , lowerCamelCase__ : Any=4 , lowerCamelCase__ : Any=0.0_0_1 , lowerCamelCase__ : Union[str, Any]=0.0_0_1 , lowerCamelCase__ : int="all" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Dict=1.0 , lowerCamelCase__ : Tuple=0.2 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : Union[str, Any]=0 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Dict=False , **lowerCamelCase__ : Tuple , ) -> int:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase = router_z_loss_coef
__lowercase = router_aux_loss_coef
__lowercase = decoder_sparse_step
__lowercase = encoder_sparse_step
__lowercase = num_experts
__lowercase = expert_capacity
__lowercase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
__lowercase = router_dtype
__lowercase = router_ignore_padding_tokens
__lowercase = batch_prioritized_routing
__lowercase = second_expert_policy
__lowercase = normalize_router_prob_before_dropping
__lowercase = moe_eval_capacity_token_fraction
__lowercase = moe_token_dropout
__lowercase = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 703 |
from __future__ import annotations
def _A( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> tuple[float, list[float]]:
'''simple docstring'''
__lowercase = list(range(len(UpperCamelCase__ ) ) )
__lowercase = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
__lowercase = 0
__lowercase = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
__lowercase = 1
max_value += value[i]
capacity -= weight[i]
else:
__lowercase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 | 0 |
def UpperCAmelCase_ (_lowerCAmelCase : str ):
__UpperCamelCase : Any = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__UpperCamelCase : Dict = ""
__UpperCamelCase : Union[str, Any] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(UpperCAmelCase__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__UpperCamelCase , __UpperCamelCase : str = 0, 0
# length[i] shows the length of palindromic substring with center i
__UpperCamelCase : Tuple = [1 for i in range(len(UpperCAmelCase__ ) )]
# for each character in new_string find corresponding palindromic string
__UpperCamelCase : List[str] = 0
for j in range(len(UpperCAmelCase__ ) ):
__UpperCamelCase : Any = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(UpperCAmelCase__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__UpperCamelCase : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__UpperCamelCase : int = j - k + 1 # noqa: E741
__UpperCamelCase : List[str] = j + k - 1
# update max_length and start position
if max_length < length[j]:
__UpperCamelCase : str = length[j]
__UpperCamelCase : Any = j
# create that string
__UpperCamelCase : Optional[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod() | 327 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ ={"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ =["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ =["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ =[
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 482 | 0 |
def lowerCAmelCase__(__snake_case ) -> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
lowerCamelCase__ = [True] * (num + 1)
lowerCamelCase__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p ,num + 1 ,__snake_case ):
lowerCamelCase__ = False
p += 1
return [prime for prime in range(2 ,num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 29 |
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case = None ,__snake_case = None ) -> None:
'''simple docstring'''
if start is None:
lowerCamelCase__ = 0
if end is None:
lowerCamelCase__ = len(__snake_case ) - 1
if start >= end:
return
lowerCamelCase__ = (start + end) // 2
slowsort(__snake_case ,__snake_case ,__snake_case )
slowsort(__snake_case ,mid + 1 ,__snake_case )
if sequence[end] < sequence[mid]:
lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[end]
slowsort(__snake_case ,__snake_case ,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29 | 1 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , A , A=1_00 , A=13 , A=30 , A=2 , A=3 , A=True , A=True , A=32 , A=4 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=10 , A=0.02 , A=3 , A=None , A=[0, 1, 2, 3] , ) -> int:
A: List[Any] = parent
A: Optional[int] = 1_00
A: str = batch_size
A: Optional[Any] = image_size
A: Union[str, Any] = patch_size
A: List[str] = num_channels
A: Optional[int] = is_training
A: Tuple = use_labels
A: Optional[int] = hidden_size
A: Any = num_hidden_layers
A: Union[str, Any] = num_attention_heads
A: List[Any] = intermediate_size
A: Optional[Any] = hidden_act
A: Dict = hidden_dropout_prob
A: Optional[int] = attention_probs_dropout_prob
A: Any = type_sequence_label_size
A: Optional[int] = initializer_range
A: List[Any] = scope
A: Dict = out_indices
A: Optional[int] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A: Optional[int] = (image_size // patch_size) ** 2
A: Optional[Any] = num_patches + 1
def a__ ( self ) -> str:
A: Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A: Any = None
A: List[str] = None
if self.use_labels:
A: Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A: List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A: Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def a__ ( self ) -> Any:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def a__ ( self , A , A , A , A ) -> int:
A: List[Any] = BeitModel(config=A )
model.to(A )
model.eval()
A: Union[str, Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , A , A , A , A ) -> int:
A: Optional[int] = BeitForMaskedImageModeling(config=A )
model.to(A )
model.eval()
A: Optional[Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def a__ ( self , A , A , A , A ) -> List[str]:
A: Dict = self.type_sequence_label_size
A: Dict = BeitForImageClassification(A )
model.to(A )
model.eval()
A: Optional[Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A: Any = 1
A: Any = BeitForImageClassification(A )
model.to(A )
model.eval()
A: Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A: List[Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self , A , A , A , A ) -> List[str]:
A: Tuple = self.num_labels
A: Union[str, Any] = BeitForSemanticSegmentation(A )
model.to(A )
model.eval()
A: Optional[Any] = model(A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
A: Optional[int] = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def a__ ( self ) -> Union[str, Any]:
A: Any = self.prepare_config_and_inputs()
A , A , A , A: List[Any] = config_and_inputs
A: str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
A__ : Any = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : Any = False
A__ : Optional[int] = False
A__ : Dict = False
def a__ ( self ) -> Optional[int]:
A: Union[str, Any] = BeitModelTester(self )
A: str = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def a__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def a__ ( self ) -> List[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ) -> List[str]:
A , A: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A: Optional[int] = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A: Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def a__ ( self ) -> str:
A , A: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A: List[Any] = model_class(A )
A: Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A: Union[str, Any] = [*signature.parameters.keys()]
A: Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def a__ ( self ) -> Tuple:
A: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def a__ ( self ) -> Dict:
A: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def a__ ( self ) -> int:
A: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def a__ ( self ) -> Optional[int]:
A: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
def a__ ( self ) -> Tuple:
if not self.model_tester.is_training:
return
A , A: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A: int = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(A ), BeitForMaskedImageModeling]:
continue
A: str = model_class(A )
model.to(A )
model.train()
A: List[Any] = self._prepare_for_class(A , A , return_labels=A )
A: List[Any] = model(**A ).loss
loss.backward()
def a__ ( self ) -> str:
A , A: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A: str = False
A: Optional[int] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(A ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
A: Dict = model_class(A )
model.gradient_checkpointing_enable()
model.to(A )
model.train()
A: str = self._prepare_for_class(A , A , return_labels=A )
A: Optional[int] = model(**A ).loss
loss.backward()
def a__ ( self ) -> Tuple:
A , A: Dict = self.model_tester.prepare_config_and_inputs_for_common()
A: Dict = _config_zero_init(A )
for model_class in self.all_model_classes:
A: Optional[Any] = model_class(config=A )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def a__ ( self ) -> Any:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A: List[str] = BeitModel.from_pretrained(A )
self.assertIsNotNone(A )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
A: Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a__ ( self ) -> List[Any]:
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Any:
A: List[Any] = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(A )
A: int = self.default_image_processor
A: int = prepare_img()
A: int = image_processor(images=A , return_tensors="""pt""" ).pixel_values.to(A )
# prepare bool_masked_pos
A: int = torch.ones((1, 1_96) , dtype=torch.bool ).to(A )
# forward pass
with torch.no_grad():
A: Tuple = model(pixel_values=A , bool_masked_pos=A )
A: Any = outputs.logits
# verify the logits
A: Optional[int] = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , A )
A: Optional[Any] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(A )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , A , atol=1e-2 ) )
@slow
def a__ ( self ) -> Union[str, Any]:
A: List[str] = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(A )
A: Dict = self.default_image_processor
A: Union[str, Any] = prepare_img()
A: List[Any] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
A: Dict = model(**A )
A: Optional[int] = outputs.logits
# verify the logits
A: List[str] = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , A )
A: int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(A )
self.assertTrue(torch.allclose(logits[0, :3] , A , atol=1e-4 ) )
A: Tuple = 2_81
self.assertEqual(logits.argmax(-1 ).item() , A )
@slow
def a__ ( self ) -> List[Any]:
A: List[str] = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
A )
A: List[Any] = self.default_image_processor
A: Optional[int] = prepare_img()
A: Tuple = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
A: List[Any] = model(**A )
A: Tuple = outputs.logits
# verify the logits
A: List[Any] = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , A )
A: Optional[int] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(A )
self.assertTrue(torch.allclose(logits[0, :3] , A , atol=1e-4 ) )
A: Tuple = 23_96
self.assertEqual(logits.argmax(-1 ).item() , A )
@slow
def a__ ( self ) -> List[str]:
A: Any = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
A: List[str] = model.to(A )
A: Union[str, Any] = BeitImageProcessor(do_resize=A , size=6_40 , do_center_crop=A )
A: List[Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A: Tuple = Image.open(ds[0]["""file"""] )
A: Optional[Any] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
A: Tuple = model(**A )
A: Optional[Any] = outputs.logits
# verify the logits
A: str = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , A )
A: Optional[int] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
A: str = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=A , )
else:
A: List[str] = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1e-4 ) )
@slow
def a__ ( self ) -> Any:
A: List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
A: Any = model.to(A )
A: Dict = BeitImageProcessor(do_resize=A , size=6_40 , do_center_crop=A )
A: str = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A: List[Any] = Image.open(ds[0]["""file"""] )
A: str = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
A: List[str] = model(**A )
A: Optional[Any] = outputs.logits.detach().cpu()
A: Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(5_00, 3_00)] )
A: List[Any] = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , A )
A: Optional[int] = image_processor.post_process_semantic_segmentation(outputs=A )
A: List[Any] = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , A )
| 135 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
__SCREAMING_SNAKE_CASE : Any =6_378_137.0
__SCREAMING_SNAKE_CASE : Optional[int] =6_356_752.314_245
__SCREAMING_SNAKE_CASE : Any =637_8137
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float ):
'''simple docstring'''
A: Optional[Any] = (AXIS_A - AXIS_B) / AXIS_A
A: str = atan((1 - flattening) * tan(radians(lowerCamelCase__ ) ) )
A: int = atan((1 - flattening) * tan(radians(lowerCamelCase__ ) ) )
A: Optional[Any] = radians(lowerCamelCase__ )
A: Optional[int] = radians(lowerCamelCase__ )
# Equation
A: Any = sin((phi_a - phi_a) / 2 )
A: Union[str, Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
A: Optional[Any] = sqrt(sin_sq_phi + (cos(lowerCamelCase__ ) * cos(lowerCamelCase__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135 | 1 |
"""simple docstring"""
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_lowercase : int = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
_lowercase : int = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
_lowercase : Optional[int] = max(len(__UpperCamelCase ) ,len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) ,b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 | """simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = DebertaVaTokenizer
_snake_case = DebertaVaTokenizerFast
_snake_case = True
_snake_case = True
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Dict = DebertaVaTokenizer(lowerCamelCase_ , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : int ):
"""simple docstring"""
_lowercase : Optional[Any] = 'this is a test'
_lowercase : int = 'this is a test'
return input_text, output_text
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_lowercase : Optional[int] = '<pad>'
_lowercase : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(lowerCamelCase_ ) , 3_0_0_0_1 )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_lowercase : Optional[int] = ' \tHeLLo!how \n Are yoU? '
_lowercase : Optional[int] = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
_lowercase : Optional[int] = DebertaVaTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ )
_lowercase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : int = DebertaVaTokenizerFast(lowerCamelCase_ , do_lower_case=lowerCamelCase_ )
_lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_lowercase : List[Any] = 'I was born in 92000, and this is falsé.'
_lowercase : int = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowercase : Union[str, Any] = DebertaVaTokenizer(lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Optional[int] = DebertaVaTokenizerFast(lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : List[Any] = 'I was born in 92000, and this is falsé.'
_lowercase : Dict = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowercase : Union[str, Any] = DebertaVaTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Dict = DebertaVaTokenizerFast(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Optional[Any] = 'I was born in 92000, and this is falsé.'
_lowercase : List[str] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_lowercase : Union[str, Any] = DebertaVaTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Union[str, Any] = DebertaVaTokenizerFast(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_lowercase : Dict = 'I was born in 92000, and this is falsé.'
_lowercase : Union[str, Any] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowercase : Optional[int] = DebertaVaTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Tuple = DebertaVaTokenizerFast(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_lowercase : Optional[Any] = ' \tHeLLo!how \n Are yoU? '
_lowercase : int = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
_lowercase : Dict = DebertaVaTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = DebertaVaTokenizerFast(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : int = self.get_tokenizer()
_lowercase : str = self.get_rust_tokenizer()
_lowercase : Optional[int] = 'I was born in 92000, and this is falsé.'
_lowercase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
_lowercase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[str] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_lowercase : List[Any] = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : Tuple = tokenizer.encode(lowerCamelCase_ )
_lowercase : int = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : Union[str, Any] = 'This is a test'
_lowercase : Tuple = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
_lowercase : Optional[Any] = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
_lowercase : str = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
_lowercase : List[Any] = DebertaVaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
_lowercase : Optional[Any] = DebertaVaTokenizerFast(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
_lowercase : Union[str, Any] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Optional[Any] = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : int = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : int = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : int = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# fmt: off
_lowercase : str = 'I was born in 92000, and this is falsé.'
_lowercase : str = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
_lowercase : str = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
_lowercase : Optional[int] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_lowercase : Tuple = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[str] = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[str] = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Any = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Optional[Any] = DebertaVaTokenizer(lowerCamelCase_ )
_lowercase : str = tokenizer.encode('sequence builders' )
_lowercase : str = tokenizer.encode('multi-sequence build' )
_lowercase : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
_lowercase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCamelCase_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCamelCase_ , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Any = {'input_ids': [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 283 | 0 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCamelCase_ (__A ):
__magic_name__ = '''char'''
__magic_name__ = '''bpe'''
__magic_name__ = '''wp'''
lowerCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCamelCase_ (__A ):
__magic_name__ = ['''image_processor''', '''char_tokenizer''']
__magic_name__ = '''ViTImageProcessor'''
__magic_name__ = '''MgpstrTokenizer'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Any=None , **lowerCAmelCase_ : str ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase_ , )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop("feature_extractor" )
UpperCAmelCase_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
UpperCAmelCase_ : List[str] = tokenizer
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("gpt2" )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Tuple ) -> List[Any]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
UpperCAmelCase_ : Tuple = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None:
UpperCAmelCase_ : str = self.char_tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCAmelCase_ : List[str] = encodings["input_ids"]
return inputs
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[Any] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = sequences
UpperCAmelCase_ : Tuple = char_preds.size(0 )
UpperCAmelCase_ , UpperCAmelCase_ : int = self._decode_helper(lowerCAmelCase_ , "char" )
UpperCAmelCase_ , UpperCAmelCase_ : str = self._decode_helper(lowerCAmelCase_ , "bpe" )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._decode_helper(lowerCAmelCase_ , "wp" )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[Any] = []
for i in range(lowerCAmelCase_ ):
UpperCAmelCase_ : Any = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCAmelCase_ : Union[str, Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCAmelCase_ : Tuple = scores.index(max(lowerCAmelCase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCAmelCase_ : Optional[Any] = {}
UpperCAmelCase_ : Optional[Any] = final_strs
UpperCAmelCase_ : Tuple = final_scores
UpperCAmelCase_ : Optional[int] = char_strs
UpperCAmelCase_ : int = bpe_strs
UpperCAmelCase_ : Any = wp_strs
return out
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ) -> str:
if format == DecodeType.CHARACTER:
UpperCAmelCase_ : Dict = self.char_decode
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Optional[Any] = "[s]"
elif format == DecodeType.BPE:
UpperCAmelCase_ : int = self.bpe_decode
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : Any = "#"
elif format == DecodeType.WORDPIECE:
UpperCAmelCase_ : Union[str, Any] = self.wp_decode
UpperCAmelCase_ : Optional[int] = 102
UpperCAmelCase_ : Union[str, Any] = "[SEP]"
else:
raise ValueError(f"""Format {format} is not supported.""" )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = [], []
UpperCAmelCase_ : int = pred_logits.size(0 )
UpperCAmelCase_ : str = pred_logits.size(1 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = pred_logits.topk(1 , dim=-1 , largest=lowerCAmelCase_ , sorted=lowerCAmelCase_ )
UpperCAmelCase_ : Dict = preds_index.view(-1 , lowerCAmelCase_ )[:, 1:]
UpperCAmelCase_ : Optional[Any] = decoder(lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.nn.functional.softmax(lowerCAmelCase_ , dim=2 ).max(dim=2 )
UpperCAmelCase_ : int = preds_max_prob[:, 1:]
for index in range(lowerCAmelCase_ ):
UpperCAmelCase_ : Union[str, Any] = preds_str[index].find(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = preds_str[index][:pred_eos]
UpperCAmelCase_ : Tuple = preds_index[index].cpu().tolist()
UpperCAmelCase_ : Optional[int] = pred_index.index(lowerCAmelCase_ ) if eos_token in pred_index else -1
UpperCAmelCase_ : str = preds_max_prob[index][: pred_eos_index + 1]
UpperCAmelCase_ : Dict = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCAmelCase_ )
conf_scores.append(lowerCAmelCase_ )
return dec_strs, conf_scores
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Any ) -> List[str]:
UpperCAmelCase_ : List[Any] = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(lowerCAmelCase_ )]
return decode_strs
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Any ) -> List[str]:
return self.bpe_tokenizer.batch_decode(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Any ) -> List[Any]:
UpperCAmelCase_ : Dict = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(lowerCAmelCase_ )]
return decode_strs
| 95 |
def A__ ( _a : list ):
'''simple docstring'''
if any(not isinstance(_a , _a ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(_a ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_a , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 385 | 0 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __UpperCAmelCase :
"""simple docstring"""
_lowerCamelCase = None
@experimental
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return _map_with_joblib(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = num_proc if num_proc <= len(lowerCAmelCase__ ) else len(lowerCAmelCase__ )
__a = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowerCAmelCase__ ):
__a = len(lowerCAmelCase__ ) // num_proc
__a = len(lowerCAmelCase__ ) % num_proc
__a = div * index + min(lowerCAmelCase__ , lowerCAmelCase__ )
__a = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowerCAmelCase__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'''Error dividing inputs iterable among processes. '''
f'''Total number of objects {len(lowerCAmelCase__ )}, '''
f'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
f'''Spawning {num_proc} processes for {len(lowerCAmelCase__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
__a , __a = None, None
if not disable_tqdm:
__a , __a = (RLock(),), tqdm.set_lock
with Pool(lowerCAmelCase__ , initargs=lowerCAmelCase__ , initializer=lowerCAmelCase__ ) as pool:
__a = pool.map(lowerCAmelCase__ , lowerCAmelCase__ )
logger.info(f'''Finished {num_proc} processes''' )
__a = [obj for proc_res in mapped for obj in proc_res]
logger.info(f'''Unpacked {len(lowerCAmelCase__ )} objects''' )
return mapped
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=lowerCAmelCase__ ):
return joblib.Parallel()(
joblib.delayed(lowerCAmelCase__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a (lowerCAmelCase__ ):
__a = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
__a = None
| 721 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = 42
class __UpperCAmelCase ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 3 , __A = 3 , __A = ("DownEncoderBlock2D",) , __A = ("UpDecoderBlock2D",) , __A = (64,) , __A = 1 , __A = "silu" , __A = 3 , __A = 32 , __A = 256 , __A = 32 , __A = None , __A = 0.18215 , __A = "group" , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
__a = vq_embed_dim if vq_embed_dim is not None else latent_channels
__a = nn.Convad(__A , __A , 1 )
__a = VectorQuantizer(__A , __A , beta=0.25 , remap=__A , sane_index_shape=__A )
__a = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
__a = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def snake_case_ ( self , __A , __A = True ):
__a = self.encoder(__A )
__a = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def snake_case_ ( self , __A , __A = False , __A = True ):
# also go through quantization layer
if not force_not_quantize:
__a , __a , __a = self.quantize(__A )
else:
__a = h
__a = self.post_quant_conv(__A )
__a = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def snake_case_ ( self , __A , __A = True ):
__a = sample
__a = self.encode(__A ).latents
__a = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
| 209 | 0 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowercase__ : List[str] = Mapping[str, np.ndarray]
lowercase__ : Dict = Mapping[str, Any] # Is a nested dict.
lowercase__ : Dict = 0.01
@dataclasses.dataclass(frozen=a__ )
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCAmelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCAmelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCAmelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCAmelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCAmelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCAmelCase = None
# Templates used to generate this protein (prediction-only)
lowerCAmelCase = None
# Chain corresponding to each parent
lowerCAmelCase = None
def _lowerCAmelCase ( __snake_case : str ) -> Protein:
__A : Optional[Any] = r'(\[[A-Z]+\]\n)'
__A : List[str] = [tag.strip() for tag in re.split(__snake_case , __snake_case ) if len(__snake_case ) > 0]
__A : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
__A : List[str] = ["N", "CA", "C"]
__A : int = None
__A : Dict = None
__A : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
__A : Optional[int] = g[1][0].strip()
for i in range(len(__snake_case ) ):
if seq[i] not in residue_constants.restypes:
__A : Optional[int] = 'X' # FIXME: strings are immutable
__A : int = np.array(
[residue_constants.restype_order.get(__snake_case , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__A : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__snake_case , g[1][axis].split() ) ) )
__A : List[str] = np.array(__snake_case )
__A : Tuple = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__snake_case ):
__A : Dict = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__A : Dict = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
__A : Any = np.zeros(
(
len(__snake_case ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__snake_case ):
__A : Tuple = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__snake_case , atom_mask=__snake_case , aatype=__snake_case , residue_index=np.arange(len(__snake_case ) ) , b_factors=__snake_case , )
def _lowerCAmelCase ( __snake_case : Protein , __snake_case : int = 0 ) -> List[str]:
__A : List[str] = []
__A : Dict = prot.remark
if remark is not None:
pdb_headers.append(f'REMARK {remark}' )
__A : Optional[int] = prot.parents
__A : List[Any] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__A : Tuple = [p for i, p in zip(__snake_case , __snake_case ) if i == chain_id]
if parents is None or len(__snake_case ) == 0:
__A : List[str] = ['N/A']
pdb_headers.append(f'PARENT {" ".join(__snake_case )}' )
return pdb_headers
def _lowerCAmelCase ( __snake_case : Protein , __snake_case : str ) -> str:
__A : List[str] = []
__A : Union[str, Any] = pdb_str.split('\n' )
__A : Tuple = prot.remark
if remark is not None:
out_pdb_lines.append(f'REMARK {remark}' )
__A : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
__A : List[Any] = []
if prot.parents_chain_index is not None:
__A : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__snake_case ) , [] )
parent_dict[str(__snake_case )].append(__snake_case )
__A : Dict = max([int(__snake_case ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__A : Any = parent_dict.get(str(__snake_case ) , ['N/A'] )
parents_per_chain.append(__snake_case )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__A : Any = [['N/A']]
def make_parent_line(__snake_case : Sequence[str] ) -> str:
return f'PARENT {" ".join(__snake_case )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__A : Any = 0
for i, l in enumerate(__snake_case ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__snake_case )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__snake_case ):
__A : Union[str, Any] = parents_per_chain[chain_counter]
else:
__A : List[Any] = ['N/A']
out_pdb_lines.append(make_parent_line(__snake_case ) )
return "\n".join(__snake_case )
def _lowerCAmelCase ( __snake_case : Protein ) -> str:
__A : List[str] = residue_constants.restypes + ['X']
def res_atoa(__snake_case : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
__A : List[str] = residue_constants.atom_types
__A : List[str] = []
__A : Dict = prot.atom_mask
__A : Any = prot.aatype
__A : Tuple = prot.atom_positions
__A : Any = prot.residue_index.astype(np.intaa )
__A : List[Any] = prot.b_factors
__A : Any = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
__A : str = get_pdb_headers(__snake_case )
if len(__snake_case ) > 0:
pdb_lines.extend(__snake_case )
__A : int = aatype.shape[0]
__A : Tuple = 1
__A : int = 0
__A : Union[str, Any] = string.ascii_uppercase
__A : Optional[Any] = None
# Add all atom sites.
for i in range(__snake_case ):
__A : Union[str, Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__snake_case , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__A : Any = 'ATOM'
__A : Tuple = atom_name if len(__snake_case ) == 4 else f' {atom_name}'
__A : Tuple = ''
__A : Tuple = ''
__A : Any = 1.00
__A : Any = atom_name[0] # Protein supports only C, N, O, S, this works.
__A : int = ''
__A : Tuple = 'A'
if chain_index is not None:
__A : Tuple = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__A : Any = (
f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
f'{res_name_a:>3} {chain_tag:>1}'
f'{residue_index[i]:>4}{insertion_code:>1} '
f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
f'{occupancy:>6.2f}{b_factor:>6.2f} '
f'{element:>2}{charge:>2}'
)
pdb_lines.append(__snake_case )
atom_index += 1
__A : str = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__A : str = True
__A : List[Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
__A : Any = 'TER'
__A : List[Any] = (
f'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(__snake_case )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__snake_case , __snake_case ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(__snake_case )
def _lowerCAmelCase ( __snake_case : Protein ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _lowerCAmelCase ( __snake_case : FeatureDict , __snake_case : ModelOutput , __snake_case : Optional[np.ndarray] = None , __snake_case : Optional[np.ndarray] = None , __snake_case : Optional[str] = None , __snake_case : Optional[Sequence[str]] = None , __snake_case : Optional[Sequence[int]] = None , ) -> Protein:
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=__snake_case , remark=__snake_case , parents=__snake_case , parents_chain_index=__snake_case , ) | 8 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : int = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) )
__A : str = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__A : str = tensor_value
__A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
__A : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 8 | 1 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A__ ( lowercase: Any, lowercase: Optional[Any], lowercase: str ) -> Union[str, Any]:
A : Any =('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
A : Dict =(
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(lowercase ):
os.makedirs(lowercase )
A : Union[str, Any] =model.state_dict()
def to_tf_var_name(lowercase: Dict ):
for patt, repl in iter(lowercase ):
A : str =name.replace(lowercase, lowercase )
return F'bert/{name}'
def create_tf_var(lowercase: str, lowercase: str, lowercase: Any ):
A : str =tf.dtypes.as_dtype(tensor.dtype )
A : int =tf.get_variable(dtype=lowercase, shape=tensor.shape, name=lowercase, initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowercase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
A : Union[str, Any] =to_tf_var_name(lowercase )
A : List[Any] =state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
A : Union[str, Any] =torch_tensor.T
A : str =create_tf_var(tensor=lowercase, name=lowercase, session=lowercase )
tf.keras.backend.set_value(lowercase, lowercase )
A : Optional[Any] =session.run(lowercase )
print(F'Successfully created {tf_name}: {np.allclose(lowercase, lowercase )}' )
A : Dict =tf.train.Saver(tf.trainable_variables() )
saver.save(lowercase, os.path.join(lowercase, model_name.replace('-', '_' ) + '.ckpt' ) )
def A__ ( lowercase: Dict=None ) -> Tuple:
A : int =argparse.ArgumentParser()
parser.add_argument('--model_name', type=lowercase, required=lowercase, help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir', type=lowercase, default=lowercase, required=lowercase, help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path', type=lowercase, required=lowercase, help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir', type=lowercase, required=lowercase, help='Directory in which to save tensorflow model' )
A : Any =parser.parse_args(lowercase )
A : Union[str, Any] =BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path ), cache_dir=args.cache_dir, )
convert_pytorch_checkpoint_to_tf(model=lowercase, ckpt_dir=args.tf_cache_dir, model_name=args.model_name )
if __name__ == "__main__":
main()
| 701 | from typing import List
from .keymap import KEYMAP, get_character
def A__ ( lowercase: str ) -> List[str]:
def decorator(lowercase: int ):
A : Tuple =getattr(lowercase, 'handle_key', [] )
handle += [key]
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
def A__ ( *lowercase: List[str] ) -> Dict:
def decorator(lowercase: Union[str, Any] ):
A : Optional[int] =getattr(lowercase, 'handle_key', [] )
handle += keys
setattr(lowercase, 'handle_key', lowercase )
return func
return decorator
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __new__( cls : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
A : Dict =super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not hasattr(SCREAMING_SNAKE_CASE__ , 'key_handler' ):
setattr(SCREAMING_SNAKE_CASE__ , 'key_handler' , {} )
setattr(SCREAMING_SNAKE_CASE__ , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
A : Optional[Any] =getattr(SCREAMING_SNAKE_CASE__ , 'handle_key' , [] )
for key in handled_keys:
A : str =value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls : str ) -> Any:
A : str =get_character()
if char != KEYMAP["undefined"]:
A : List[str] =ord(SCREAMING_SNAKE_CASE__ )
A : List[str] =cls.key_handler.get(SCREAMING_SNAKE_CASE__ )
if handler:
A : List[str] =char
return handler(cls )
else:
return None
def A__ ( cls: Optional[int] ) -> str:
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 661 | 0 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> list:
if len(_UpperCAmelCase ) != 2 or len(a[0] ) != 2 or len(_UpperCAmelCase ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
_a = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_UpperCAmelCase ) )
]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Any:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_UpperCAmelCase ) )
]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> tuple[list, list, list, list]:
if len(_UpperCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
_a = len(_UpperCAmelCase )
_a = matrix_length // 2
_a = [[a[i][j] for j in range(_UpperCAmelCase , _UpperCAmelCase )] for i in range(_UpperCAmelCase )]
_a = [
[a[i][j] for j in range(_UpperCAmelCase , _UpperCAmelCase )] for i in range(_UpperCAmelCase , _UpperCAmelCase )
]
_a = [[a[i][j] for j in range(_UpperCAmelCase )] for i in range(_UpperCAmelCase )]
_a = [[a[i][j] for j in range(_UpperCAmelCase )] for i in range(_UpperCAmelCase , _UpperCAmelCase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> tuple[int, int]:
return len(_UpperCAmelCase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
print('\n'.join(str(_UpperCAmelCase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> list:
if matrix_dimensions(_UpperCAmelCase ) == (2, 2):
return default_matrix_multiplication(_UpperCAmelCase , _UpperCAmelCase )
_a , _a , _a , _a = split_matrix(_UpperCAmelCase )
_a , _a , _a , _a = split_matrix(_UpperCAmelCase )
_a = actual_strassen(_UpperCAmelCase , matrix_subtraction(_UpperCAmelCase , _UpperCAmelCase ) )
_a = actual_strassen(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
_a = actual_strassen(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
_a = actual_strassen(_UpperCAmelCase , matrix_subtraction(_UpperCAmelCase , _UpperCAmelCase ) )
_a = actual_strassen(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) )
_a = actual_strassen(matrix_subtraction(_UpperCAmelCase , _UpperCAmelCase ) , matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) )
_a = actual_strassen(matrix_subtraction(_UpperCAmelCase , _UpperCAmelCase ) , matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) )
_a = matrix_addition(matrix_subtraction(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ) , _UpperCAmelCase )
_a = matrix_addition(_UpperCAmelCase , _UpperCAmelCase )
_a = matrix_addition(_UpperCAmelCase , _UpperCAmelCase )
_a = matrix_subtraction(matrix_subtraction(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ) , _UpperCAmelCase )
# construct the new matrix from our 4 quadrants
_a = []
for i in range(len(_UpperCAmelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_UpperCAmelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> list:
if matrix_dimensions(_UpperCAmelCase )[1] != matrix_dimensions(_UpperCAmelCase )[0]:
_a = (
'Unable to multiply these matrices, please check the dimensions.\n'
f"""Matrix A: {matrixa}\n"""
f"""Matrix B: {matrixa}"""
)
raise Exception(_UpperCAmelCase )
_a = matrix_dimensions(_UpperCAmelCase )
_a = matrix_dimensions(_UpperCAmelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
_a = max(*_UpperCAmelCase , *_UpperCAmelCase )
_a = int(math.pow(2 , math.ceil(math.loga(_UpperCAmelCase ) ) ) )
_a = matrixa
_a = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _UpperCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
_a = actual_strassen(_UpperCAmelCase , _UpperCAmelCase )
# Removing the additional zeros
for i in range(0 , _UpperCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCAmelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowercase_ = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowercase_ = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 562 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = ["pixel_values"]
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : Tuple=PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
_a = do_resize
_a = do_rescale
_a = size_divisor
_a = resample
super().__init__(**SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE_ : Dict ):
_a , _a = get_image_size(SCREAMING_SNAKE_CASE_ )
# Rounds the height and width down to the closest multiple of size_divisor
_a = height // size_divisor * size_divisor
_a = width // size_divisor * size_divisor
_a = resize(SCREAMING_SNAKE_CASE_ , (new_h, new_w) , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return image
def _UpperCAmelCase ( self : str , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE_ : Dict ):
return rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[TensorType, str]] = None , SCREAMING_SNAKE_CASE_ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : List[str] , ):
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = size_divisor if size_divisor is not None else self.size_divisor
_a = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
_a = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for img in images]
if do_resize:
_a = [self.resize(SCREAMING_SNAKE_CASE_ , size_divisor=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
_a = [self.rescale(SCREAMING_SNAKE_CASE_ , scale=1 / 2_5_5 ) for image in images]
_a = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
_a = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 562 | 1 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
__SCREAMING_SNAKE_CASE ="""sshleifer/student_marian_en_ro_6_1"""
__SCREAMING_SNAKE_CASE ="""sshleifer/tiny-mbart"""
@require_torch
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
def _A ( self: List[Any] , _lowerCamelCase: List[Any]=False , _lowerCamelCase: List[Any]=None , _lowerCamelCase: int=True , _lowerCamelCase: Optional[int]=True , _lowerCamelCase: Any=True , _lowerCamelCase: Any=True , ):
SCREAMING_SNAKE_CASE_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowerCamelCase , num_train_epochs=1 , distributed=_lowerCamelCase , extra_args_str=_lowerCamelCase , predict_with_generate=_lowerCamelCase , do_train=_lowerCamelCase , do_eval=_lowerCamelCase , do_predict=_lowerCamelCase , )
SCREAMING_SNAKE_CASE_ = TrainerState.load_from_json(os.path.join(_lowerCamelCase , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
SCREAMING_SNAKE_CASE_ = [log for log in logs if '''eval_loss''' in log.keys()]
SCREAMING_SNAKE_CASE_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
SCREAMING_SNAKE_CASE_ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowerCamelCase )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _A ( self: Optional[int] ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _A ( self: Optional[Any] ):
self.run_seqaseq_quick(distributed=_lowerCamelCase )
@require_torch_multi_gpu
def _A ( self: List[Any] ):
self.run_seqaseq_quick(distributed=_lowerCamelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def _A ( self: Optional[int] ):
self.run_seqaseq_quick(distributed=_lowerCamelCase , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def _A ( self: Optional[Any] ):
self.run_seqaseq_quick(distributed=_lowerCamelCase , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def _A ( self: Union[str, Any] ):
self.run_seqaseq_quick(distributed=_lowerCamelCase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowerCamelCase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def _A ( self: Optional[Any] ):
self.run_seqaseq_quick(
distributed=_lowerCamelCase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowerCamelCase )
@require_apex
@require_torch_gpu
def _A ( self: List[Any] ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowerCamelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowerCamelCase , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def _A ( self: Union[str, Any] , _lowerCamelCase: Dict ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
SCREAMING_SNAKE_CASE_ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
SCREAMING_SNAKE_CASE_ = experiments[experiment_id]
SCREAMING_SNAKE_CASE_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
SCREAMING_SNAKE_CASE_ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowerCamelCase , extra_args_str=data['''extra_args_str'''] )
SCREAMING_SNAKE_CASE_ = len(re.findall(_lowerCamelCase , cl.err ) )
self.assertEqual(_lowerCamelCase , data['''n_matches'''] )
@slow
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=_lowerCamelCase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowerCamelCase , )
# Check metrics
SCREAMING_SNAKE_CASE_ = TrainerState.load_from_json(os.path.join(_lowerCamelCase , '''trainer_state.json''' ) ).log_history
SCREAMING_SNAKE_CASE_ = [log for log in logs if '''eval_loss''' in log.keys()]
SCREAMING_SNAKE_CASE_ = eval_metrics[0]
SCREAMING_SNAKE_CASE_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowerCamelCase )
# test if do_predict saves generations and metrics
SCREAMING_SNAKE_CASE_ = os.listdir(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = {os.path.basename(_lowerCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _A ( self: Optional[int] ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowerCamelCase: str ) -> Tuple[int, float]:
SCREAMING_SNAKE_CASE_ = '''--skip_memory_metrics 0'''
SCREAMING_SNAKE_CASE_ = self.run_trainer(
max_len=1_28 , model_name=_lowerCamelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowerCamelCase , distributed=_lowerCamelCase , extra_args_str=_lowerCamelCase , do_eval=_lowerCamelCase , do_predict=_lowerCamelCase , n_gpus_to_use=1 , )
# Check metrics
SCREAMING_SNAKE_CASE_ = TrainerState.load_from_json(Path(_lowerCamelCase , '''trainer_state.json''' ) ).log_history
SCREAMING_SNAKE_CASE_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
SCREAMING_SNAKE_CASE_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
SCREAMING_SNAKE_CASE_ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
SCREAMING_SNAKE_CASE_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
SCREAMING_SNAKE_CASE_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
SCREAMING_SNAKE_CASE_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
SCREAMING_SNAKE_CASE_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
SCREAMING_SNAKE_CASE_ = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowerCamelCase , _lowerCamelCase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowerCamelCase , _lowerCamelCase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowerCamelCase , _lowerCamelCase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def _A ( self: Union[str, Any] , _lowerCamelCase: int , _lowerCamelCase: str , _lowerCamelCase: int , _lowerCamelCase: float = 3E-3 , _lowerCamelCase: str = "adafactor" , _lowerCamelCase: bool = False , _lowerCamelCase: str = None , _lowerCamelCase: int = 0 , _lowerCamelCase: bool = True , _lowerCamelCase: bool = True , _lowerCamelCase: bool = True , _lowerCamelCase: bool = True , _lowerCamelCase: int = None , ):
SCREAMING_SNAKE_CASE_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowerCamelCase )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowerCamelCase )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
SCREAMING_SNAKE_CASE_ = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowerCamelCase )}\n ".split()
SCREAMING_SNAKE_CASE_ = '''
--do_predict
'''.split()
SCREAMING_SNAKE_CASE_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
SCREAMING_SNAKE_CASE_ = get_gpu_count()
SCREAMING_SNAKE_CASE_ = get_torch_dist_unique_port()
SCREAMING_SNAKE_CASE_ = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
SCREAMING_SNAKE_CASE_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowerCamelCase , env=self.get_env() )
else:
SCREAMING_SNAKE_CASE_ = ['''run_translation.py'''] + args
with patch.object(_lowerCamelCase , '''argv''' , _lowerCamelCase ):
main()
return output_dir
| 89 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self: Union[str, Any] , _lowerCamelCase: List[str] , _lowerCamelCase: Union[str, Any]=7 , _lowerCamelCase: int=3 , _lowerCamelCase: Optional[int]=18 , _lowerCamelCase: Optional[Any]=30 , _lowerCamelCase: Any=4_00 , _lowerCamelCase: List[str]=True , _lowerCamelCase: str=None , _lowerCamelCase: Union[str, Any]=True , _lowerCamelCase: Union[str, Any]=False , _lowerCamelCase: int=True , _lowerCamelCase: Any=True , _lowerCamelCase: List[str]=[0.5, 0.5, 0.5] , _lowerCamelCase: Dict=[0.5, 0.5, 0.5] , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size if size is not None else {'''height''': 18, '''width''': 20}
SCREAMING_SNAKE_CASE_ = do_thumbnail
SCREAMING_SNAKE_CASE_ = do_align_axis
SCREAMING_SNAKE_CASE_ = do_pad
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
def _A ( self: str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = DonutImageProcessor if is_vision_available() else None
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = DonutImageProcessingTester(self )
@property
def _A ( self: List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self: Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
def _A ( self: List[str] ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _A ( self: List[Any] ):
pass
@is_flaky()
def _A ( self: int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _A ( self: Optional[Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _A ( self: List[str] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 89 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : str = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'roformer'
def __init__( self : Union[str, Any] , lowerCamelCase__ : List[Any]=50_000 , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : int=768 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : Dict=12 , lowerCamelCase__ : int=3_072 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Union[str, Any]=1_536 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : Union[str, Any]=0.02 , lowerCamelCase__ : List[str]=1E-12 , lowerCamelCase__ : Optional[Any]=0 , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : str=True , **lowerCamelCase__ : str , ):
super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ )
a__ : int = vocab_size
a__ : Optional[Any] = hidden_size if embedding_size is None else embedding_size
a__ : List[str] = hidden_size
a__ : Any = num_hidden_layers
a__ : Optional[int] = num_attention_heads
a__ : List[str] = hidden_act
a__ : Tuple = intermediate_size
a__ : Any = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : str = max_position_embeddings
a__ : Any = type_vocab_size
a__ : int = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : Optional[int] = rotary_value
a__ : str = use_cache
class A__ ( A__ ):
"""simple docstring"""
@property
def _UpperCamelCase( self : List[Any] ):
if self.task == "multiple-choice":
a__ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a__ : List[str] = {0: "batch", 1: "sequence"}
a__ : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 37 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __snake_case ( UpperCamelCase_ ):
_a = '''Salesforce/blip-image-captioning-base'''
_a = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
_a = '''image_captioner'''
_a = AutoModelForVisionaSeq
_a = ['''image''']
_a = ['''text''']
def __init__( self : Optional[Any] , *A_ : Dict , **A_ : List[str]):
requires_backends(self , ['''vision'''])
super().__init__(*A_ , **A_)
def UpperCAmelCase__ ( self : Any , A_ : "Image"):
return self.pre_processor(images=A_ , return_tensors='''pt''')
def UpperCAmelCase__ ( self : Dict , A_ : Any):
return self.model.generate(**A_)
def UpperCAmelCase__ ( self : List[str] , A_ : Any):
return self.pre_processor.batch_decode(A_ , skip_special_tokens=A_)[0].strip()
| 171 | 0 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE : str = """ResNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE : List[Any] = """microsoft/resnet-50"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 2048, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE : Tuple = """microsoft/resnet-50"""
SCREAMING_SNAKE_CASE : Union[str, Any] = """tiger cat"""
SCREAMING_SNAKE_CASE : Optional[Any] = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self, _lowercase, _lowercase, _lowercase = 3, _lowercase = 1, _lowercase = "relu" ) -> Any:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Convad(
_lowercase, _lowercase, kernel_size=_lowercase, stride=_lowercase, padding=kernel_size // 2, bias=_lowercase )
SCREAMING_SNAKE_CASE_ = nn.BatchNormad(_lowercase )
SCREAMING_SNAKE_CASE_ = ACTaFN[activation] if activation is not None else nn.Identity()
def a__ ( self, _lowercase ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.convolution(_lowercase )
SCREAMING_SNAKE_CASE_ = self.normalization(_lowercase )
SCREAMING_SNAKE_CASE_ = self.activation(_lowercase )
return hidden_state
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self, _lowercase ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE_ = ResNetConvLayer(
config.num_channels, config.embedding_size, kernel_size=7, stride=2, activation=config.hidden_act )
SCREAMING_SNAKE_CASE_ = nn.MaxPoolad(kernel_size=3, stride=2, padding=1 )
SCREAMING_SNAKE_CASE_ = config.num_channels
def a__ ( self, _lowercase ) -> List[str]:
SCREAMING_SNAKE_CASE_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
SCREAMING_SNAKE_CASE_ = self.embedder(_lowercase )
SCREAMING_SNAKE_CASE_ = self.pooler(_lowercase )
return embedding
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self, _lowercase, _lowercase, _lowercase = 2 ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Convad(_lowercase, _lowercase, kernel_size=1, stride=_lowercase, bias=_lowercase )
SCREAMING_SNAKE_CASE_ = nn.BatchNormad(_lowercase )
def a__ ( self, _lowercase ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.convolution(_lowercase )
SCREAMING_SNAKE_CASE_ = self.normalization(_lowercase )
return hidden_state
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self, _lowercase, _lowercase, _lowercase = 1, _lowercase = "relu" ) -> Any:
super().__init__()
SCREAMING_SNAKE_CASE_ = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ = (
ResNetShortCut(_lowercase, _lowercase, stride=_lowercase ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE_ = nn.Sequential(
ResNetConvLayer(_lowercase, _lowercase, stride=_lowercase ), ResNetConvLayer(_lowercase, _lowercase, activation=_lowercase ), )
SCREAMING_SNAKE_CASE_ = ACTaFN[activation]
def a__ ( self, _lowercase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = hidden_state
SCREAMING_SNAKE_CASE_ = self.layer(_lowercase )
SCREAMING_SNAKE_CASE_ = self.shortcut(_lowercase )
hidden_state += residual
SCREAMING_SNAKE_CASE_ = self.activation(_lowercase )
return hidden_state
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self, _lowercase, _lowercase, _lowercase = 1, _lowercase = "relu", _lowercase = 4 ) -> Tuple:
super().__init__()
SCREAMING_SNAKE_CASE_ = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ = out_channels // reduction
SCREAMING_SNAKE_CASE_ = (
ResNetShortCut(_lowercase, _lowercase, stride=_lowercase ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE_ = nn.Sequential(
ResNetConvLayer(_lowercase, _lowercase, kernel_size=1 ), ResNetConvLayer(_lowercase, _lowercase, stride=_lowercase ), ResNetConvLayer(_lowercase, _lowercase, kernel_size=1, activation=_lowercase ), )
SCREAMING_SNAKE_CASE_ = ACTaFN[activation]
def a__ ( self, _lowercase ) -> List[str]:
SCREAMING_SNAKE_CASE_ = hidden_state
SCREAMING_SNAKE_CASE_ = self.layer(_lowercase )
SCREAMING_SNAKE_CASE_ = self.shortcut(_lowercase )
hidden_state += residual
SCREAMING_SNAKE_CASE_ = self.activation(_lowercase )
return hidden_state
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self, _lowercase, _lowercase, _lowercase, _lowercase = 2, _lowercase = 2, ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE_ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
SCREAMING_SNAKE_CASE_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_lowercase, _lowercase, stride=_lowercase, activation=config.hidden_act ), *[layer(_lowercase, _lowercase, activation=config.hidden_act ) for _ in range(depth - 1 )], )
def a__ ( self, _lowercase ) -> int:
SCREAMING_SNAKE_CASE_ = input
for layer in self.layers:
SCREAMING_SNAKE_CASE_ = layer(_lowercase )
return hidden_state
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self, _lowercase ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_lowercase, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], ) )
SCREAMING_SNAKE_CASE_ = zip(config.hidden_sizes, config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_lowercase, config.depths[1:] ):
self.stages.append(ResNetStage(_lowercase, _lowercase, _lowercase, depth=_lowercase ) )
def a__ ( self, _lowercase, _lowercase = False, _lowercase = True ) -> Any:
SCREAMING_SNAKE_CASE_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE_ = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE_ = stage_module(_lowercase )
if output_hidden_states:
SCREAMING_SNAKE_CASE_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_lowercase, hidden_states=_lowercase, )
class snake_case ( snake_case_ ):
"""simple docstring"""
_a = ResNetConfig
_a = """resnet"""
_a = """pixel_values"""
_a = True
def a__ ( self, _lowercase ) -> int:
if isinstance(_lowercase, nn.Convad ):
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu' )
elif isinstance(_lowercase, (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight, 1 )
nn.init.constant_(module.bias, 0 )
def a__ ( self, _lowercase, _lowercase=False ) -> int:
if isinstance(_lowercase, _lowercase ):
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE : List[str] = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE : Tuple = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""", snake_case_, )
class snake_case ( snake_case_ ):
"""simple docstring"""
def __init__( self, _lowercase ) -> List[str]:
super().__init__(_lowercase )
SCREAMING_SNAKE_CASE_ = config
SCREAMING_SNAKE_CASE_ = ResNetEmbeddings(_lowercase )
SCREAMING_SNAKE_CASE_ = ResNetEncoder(_lowercase )
SCREAMING_SNAKE_CASE_ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC, output_type=_lowercase, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE, )
def a__ ( self, _lowercase, _lowercase = None, _lowercase = None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.embedder(_lowercase )
SCREAMING_SNAKE_CASE_ = self.encoder(
_lowercase, output_hidden_states=_lowercase, return_dict=_lowercase )
SCREAMING_SNAKE_CASE_ = encoder_outputs[0]
SCREAMING_SNAKE_CASE_ = self.pooler(_lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowercase, pooler_output=_lowercase, hidden_states=encoder_outputs.hidden_states, )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""", snake_case_, )
class snake_case ( snake_case_ ):
"""simple docstring"""
def __init__( self, _lowercase ) -> Dict:
super().__init__(_lowercase )
SCREAMING_SNAKE_CASE_ = config.num_labels
SCREAMING_SNAKE_CASE_ = ResNetModel(_lowercase )
# classification head
SCREAMING_SNAKE_CASE_ = nn.Sequential(
nn.Flatten(), nn.Linear(config.hidden_sizes[-1], config.num_labels ) if config.num_labels > 0 else nn.Identity(), )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=_lowercase, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, )
def a__ ( self, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = None, ) -> Any:
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.resnet(_lowercase, output_hidden_states=_lowercase, return_dict=_lowercase )
SCREAMING_SNAKE_CASE_ = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_ = self.classifier(_lowercase )
SCREAMING_SNAKE_CASE_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_ = 'single_label_classification'
else:
SCREAMING_SNAKE_CASE_ = 'multi_label_classification'
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_ = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_ = loss_fct(logits.squeeze(), labels.squeeze() )
else:
SCREAMING_SNAKE_CASE_ = loss_fct(_lowercase, _lowercase )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ = loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_ = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_ = loss_fct(_lowercase, _lowercase )
if not return_dict:
SCREAMING_SNAKE_CASE_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_lowercase, logits=_lowercase, hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""", snake_case_, )
class snake_case ( snake_case_, snake_case_ ):
"""simple docstring"""
def __init__( self, _lowercase ) -> List[str]:
super().__init__(_lowercase )
super()._init_backbone(_lowercase )
SCREAMING_SNAKE_CASE_ = [config.embedding_size] + config.hidden_sizes
SCREAMING_SNAKE_CASE_ = ResNetEmbeddings(_lowercase )
SCREAMING_SNAKE_CASE_ = ResNetEncoder(_lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowercase )
@replace_return_docstrings(output_type=_lowercase, config_class=_CONFIG_FOR_DOC )
def a__ ( self, _lowercase, _lowercase = None, _lowercase = None ) -> int:
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ = self.embedder(_lowercase )
SCREAMING_SNAKE_CASE_ = self.encoder(_lowercase, output_hidden_states=_lowercase, return_dict=_lowercase )
SCREAMING_SNAKE_CASE_ = outputs.hidden_states
SCREAMING_SNAKE_CASE_ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
SCREAMING_SNAKE_CASE_ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_lowercase, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=_lowercase, )
| 701 |
'''simple docstring'''
from typing import Any
def _UpperCamelCase ( lowerCAmelCase__: list ) -> list[Any]:
if not input_list:
return []
SCREAMING_SNAKE_CASE_ = [input_list.count(lowerCAmelCase__ ) for value in input_list]
SCREAMING_SNAKE_CASE_ = max(lowerCAmelCase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowerCAmelCase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a_ = random.Random()
def lowerCamelCase__ ( _a , _a=1.0 , _a=None , _a=None):
if rng is None:
SCREAMING_SNAKE_CASE : List[str] = global_rng
SCREAMING_SNAKE_CASE : Optional[int] = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , a : Any , a : Union[str, Any]=7 , a : List[Any]=400 , a : str=2000 , a : Dict=2048 , a : List[Any]=128 , a : Tuple=1 , a : Union[str, Any]=512 , a : List[str]=30 , a : Tuple=4_4100 , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : List[str] = min_seq_length
SCREAMING_SNAKE_CASE : List[str] = max_seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Dict = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = feature_size
SCREAMING_SNAKE_CASE : List[Any] = num_audio_channels
SCREAMING_SNAKE_CASE : Optional[Any] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = chunk_length
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __UpperCamelCase ( self : Optional[int] , a : int=False , a : Tuple=False ) -> Union[str, Any]:
"""simple docstring"""
def _flatten(a : Any ):
return list(itertools.chain(*a ) )
if equal_length:
SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Any = [np.asarray(a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =TvltFeatureExtractor
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = TvltFeatureExtractionTester(self )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(a , "spectrogram_length" ) )
self.assertTrue(hasattr(a , "feature_size" ) )
self.assertTrue(hasattr(a , "num_audio_channels" ) )
self.assertTrue(hasattr(a , "hop_length" ) )
self.assertTrue(hasattr(a , "chunk_length" ) )
self.assertTrue(hasattr(a , "sampling_rate" ) )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = feat_extract_first.save_pretrained(a )[0]
check_json_file_has_correct_format(a )
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class.from_pretrained(a )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : int = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : str = dict_first.pop("mel_filters" )
SCREAMING_SNAKE_CASE : str = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(a , a ) )
self.assertEqual(a , a )
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(a , "feat_extract.json" )
feat_extract_first.to_json_file(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class.from_json_file(a )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : Tuple = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : str = dict_first.pop("mel_filters" )
SCREAMING_SNAKE_CASE : Optional[Any] = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(a , a ) )
self.assertEqual(a , a )
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : str = [np.asarray(a ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : str = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = feature_extractor(a , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(
a , return_tensors="np" , sampling_rate=4_4100 , mask_audio=a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(a )
SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(a , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __UpperCamelCase ( self : List[Any] , a : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Dict = ds.sort("id" ).select(range(a ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : Tuple = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : Any = feature_extractor(a , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , a , atol=1e-4 ) ) | 25 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=[10, 20, 30, 40] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=None , ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : List[str] = image_size
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : Union[str, Any] = embeddings_size
__UpperCAmelCase : Dict = hidden_sizes
__UpperCAmelCase : Dict = depths
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : str = num_labels
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : Dict = len(UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : Dict = self.get_config()
return config, pixel_values
def a_ ( self : Dict):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a_ ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = FlaxRegNetModel(config=UpperCamelCase_)
__UpperCAmelCase : Dict = model(UpperCamelCase_)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a_ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Tuple = FlaxRegNetForImageClassification(config=UpperCamelCase_)
__UpperCAmelCase : str = model(UpperCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase_ = False
lowercase_ = False
lowercase_ = False
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Tuple = FlaxRegNetModelTester(self)
__UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_)
def a_ ( self : Dict):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self : Tuple):
"""simple docstring"""
return
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_)
@unittest.skip(reason="RegNet does not use inputs_embeds")
def a_ ( self : Union[str, Any]):
"""simple docstring"""
pass
@unittest.skip(reason="RegNet does not support input and output embeddings")
def a_ ( self : Optional[int]):
"""simple docstring"""
pass
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Any = [*signature.parameters.keys()]
__UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_)
def a_ ( self : int):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]):
__UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_))
__UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : str = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_) , expected_num_stages + 1)
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__UpperCAmelCase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_)
@jax.jit
def model_jitted(UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int]):
return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_)
with self.subTest("JIT Enabled"):
__UpperCAmelCase : Optional[Any] = model_jitted(**UpperCamelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
__UpperCAmelCase : Dict = model_jitted(**UpperCamelCase_).to_tuple()
self.assertEqual(len(UpperCamelCase_) , len(UpperCamelCase_))
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class a__ ( unittest.TestCase ):
@cached_property
def a_ ( self : Optional[int]):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None
@slow
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040")
__UpperCAmelCase : Dict = self.default_image_processor
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : Dict = model(**UpperCamelCase_)
# verify the logits
__UpperCAmelCase : Dict = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCamelCase_)
__UpperCAmelCase : Any = jnp.array([-0.4180, -1.5051, -3.4836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
| 77 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {'''vocab_file''': '''spiece.model'''}
__lowerCAmelCase : Tuple = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
__lowerCAmelCase : Any = {'''bert_for_seq_generation''': 512}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
snake_case__ : Union[str, Any] = VOCAB_FILES_NAMES
snake_case__ : int = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : List[int] = []
snake_case__ : str = ['input_ids', 'attention_mask']
def __init__( self :List[Any] , __magic_name__ :Optional[Any] , __magic_name__ :int="<s>" , __magic_name__ :Any="</s>" , __magic_name__ :Tuple="<unk>" , __magic_name__ :Tuple="<pad>" , __magic_name__ :str="<::::>" , __magic_name__ :Optional[Dict[str, Any]] = None , **__magic_name__ :Union[str, Any] , ) -> None:
'''simple docstring'''
a__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , sep_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
a__ = vocab_file
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
@property
def _UpperCamelCase ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _UpperCamelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
a__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Optional[int] ) -> Tuple:
'''simple docstring'''
a__ = self.__dict__.copy()
a__ = None
return state
def __setstate__( self :int , __magic_name__ :Dict ) -> Optional[int]:
'''simple docstring'''
a__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a__ = {}
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self :Tuple , __magic_name__ :str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def _UpperCamelCase ( self :Any , __magic_name__ :Tuple ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.piece_to_id(__magic_name__ )
def _UpperCamelCase ( self :Dict , __magic_name__ :Union[str, Any] ) -> int:
'''simple docstring'''
a__ = self.sp_model.IdToPiece(__magic_name__ )
return token
def _UpperCamelCase ( self :Any , __magic_name__ :Any ) -> Any:
'''simple docstring'''
a__ = []
a__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__magic_name__ ) + token
a__ = []
else:
current_sub_tokens.append(__magic_name__ )
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def _UpperCamelCase ( self :Dict , __magic_name__ :str , __magic_name__ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__magic_name__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a__ = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , '''wb''' ) as fi:
a__ = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
| 158 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=lowerCAmelCase )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
snake_case__ : str
snake_case__ : str
snake_case__ : Optional[str] = None
snake_case__ : Optional[str] = None
snake_case__ : Optional[str] = None
@dataclass(frozen=lowerCAmelCase )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
snake_case__ : List[int]
snake_case__ : Optional[List[int]] = None
snake_case__ : Optional[List[int]] = None
snake_case__ : Optional[Union[int, float]] = None
snake_case__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
snake_case__ : List[InputFeatures]
def __init__( self :Optional[int] , __magic_name__ :str , __magic_name__ :PreTrainedTokenizer , __magic_name__ :str , __magic_name__ :Optional[int] = None , __magic_name__ :Optional[int]=False , __magic_name__ :bool = False , ) -> str:
'''simple docstring'''
a__ = hans_processors[task]()
a__ = os.path.join(
__magic_name__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(__magic_name__ ) , __magic_name__ , ) , )
a__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a__ , a__ = label_list[2], label_list[1]
a__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a__ = cached_features_file + '''.lock'''
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not overwrite_cache:
logger.info(F"Loading features from cached file {cached_features_file}" )
a__ = torch.load(__magic_name__ )
else:
logger.info(F"Creating features from dataset file at {data_dir}" )
a__ = (
processor.get_dev_examples(__magic_name__ ) if evaluate else processor.get_train_examples(__magic_name__ )
)
logger.info('''Training examples: %s''' , len(__magic_name__ ) )
a__ = hans_convert_examples_to_features(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
logger.info('''Saving features into cached file %s''' , __magic_name__ )
torch.save(self.features , __magic_name__ )
def __len__( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
return len(self.features )
def __getitem__( self :Tuple , __magic_name__ :Any ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def _UpperCamelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
snake_case__ : List[InputFeatures]
def __init__( self :List[str] , __magic_name__ :str , __magic_name__ :PreTrainedTokenizer , __magic_name__ :str , __magic_name__ :Optional[int] = 128 , __magic_name__ :str=False , __magic_name__ :bool = False , ) -> List[str]:
'''simple docstring'''
a__ = hans_processors[task]()
a__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a__ , a__ = label_list[2], label_list[1]
a__ = label_list
a__ = processor.get_dev_examples(__magic_name__ ) if evaluate else processor.get_train_examples(__magic_name__ )
a__ = hans_convert_examples_to_features(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(__magic_name__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
a__ = tf.data.Dataset.from_generator(
__magic_name__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _UpperCamelCase ( self :str ) -> List[Any]:
'''simple docstring'''
return self.dataset
def __len__( self :Dict ) -> int:
'''simple docstring'''
return len(self.features )
def __getitem__( self :Union[str, Any] , __magic_name__ :str ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def _UpperCamelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
return self.label_list
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def _UpperCamelCase ( self :Any , __magic_name__ :List[str] ) -> List[Any]:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__magic_name__ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _UpperCamelCase ( self :List[Any] , __magic_name__ :Tuple ) -> Tuple:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__magic_name__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _UpperCamelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def _UpperCamelCase ( self :str , __magic_name__ :int , __magic_name__ :Any ) -> List[Any]:
'''simple docstring'''
a__ = []
for i, line in enumerate(__magic_name__ ):
if i == 0:
continue
a__ = '''%s-%s''' % (set_type, line[0])
a__ = line[5]
a__ = line[6]
a__ = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
a__ = line[0]
examples.append(InputExample(guid=__magic_name__ , text_a=__magic_name__ , text_b=__magic_name__ , label=__magic_name__ , pairID=__magic_name__ ) )
return examples
def __snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Dict:
"""simple docstring"""
a__ = {label: i for i, label in enumerate(UpperCamelCase )}
a__ = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCamelCase ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
a__ = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , truncation=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , )
a__ = label_map[example.label] if example.label in label_map else 0
a__ = int(example.pairID )
features.append(InputFeatures(**UpperCamelCase , label=UpperCamelCase , pairID=UpperCamelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"guid: {example}" )
logger.info(f"features: {features[i]}" )
return features
__lowerCAmelCase : str = {
'''hans''': 3,
}
__lowerCAmelCase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 158 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a :
def __init__( self : Tuple ,lowerCamelCase : Optional[Any] ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : int=3 ,lowerCamelCase : int=16 ,lowerCamelCase : Optional[Any]=[1, 2, 1] ,lowerCamelCase : int=[2, 2, 4] ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : int=2.0 ,lowerCamelCase : int=True ,lowerCamelCase : List[str]=0.0 ,lowerCamelCase : List[Any]=0.0 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : int="gelu" ,lowerCamelCase : int=False ,lowerCamelCase : List[Any]=True ,lowerCamelCase : Union[str, Any]=0.02 ,lowerCamelCase : Dict=1E-5 ,lowerCamelCase : Dict=True ,lowerCamelCase : Any=None ,lowerCamelCase : Union[str, Any]=True ,lowerCamelCase : Tuple=10 ,lowerCamelCase : Optional[int]=8 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = window_size
__SCREAMING_SNAKE_CASE = mlp_ratio
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = use_absolute_embeddings
__SCREAMING_SNAKE_CASE = patch_norm
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = encoder_stride
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Any ,lowerCamelCase : Dict ,lowerCamelCase : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SwinvaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
__SCREAMING_SNAKE_CASE = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__SCREAMING_SNAKE_CASE = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : Dict ,lowerCamelCase : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SwinvaForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = SwinvaForMaskedImageModeling(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = SwinvaForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( _snake_case, _snake_case, unittest.TestCase ):
__UpperCamelCase : str = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__UpperCamelCase : Any = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase : Dict = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : str = False
__UpperCamelCase : Any = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SwinvaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,embed_dim=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase ,nn.Linear ) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = outputs.attentions
__SCREAMING_SNAKE_CASE = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = config.window_size**2
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
__SCREAMING_SNAKE_CASE = len(lowerCamelCase )
# Check attention is always last and order is fine
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) )
if hasattr(self.model_tester ,"""num_hidden_states_types""" ):
__SCREAMING_SNAKE_CASE = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__SCREAMING_SNAKE_CASE = 2
self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Any ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase )
# Swinv2 has a different seq_length
__SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
__SCREAMING_SNAKE_CASE = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = reshaped_hidden_states[0].shape
__SCREAMING_SNAKE_CASE = (
reshaped_hidden_states[0].view(lowerCamelCase ,lowerCamelCase ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__SCREAMING_SNAKE_CASE = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__SCREAMING_SNAKE_CASE = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,(padded_height, padded_width) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = SwinvaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = _config_zero_init(lowerCamelCase )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class __a ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase ,return_tensors="""pt""" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCamelCase )
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase ,atol=1E-4 ) )
| 109 |
'''simple docstring'''
import os
from math import logaa
def __magic_name__ ( __UpperCAmelCase = "base_exp.txt" ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__UpperCAmelCase ) , __UpperCAmelCase ) ) ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = list(map(__UpperCAmelCase , line.split(""",""" ) ) )
if x * logaa(__UpperCAmelCase ) > largest:
__SCREAMING_SNAKE_CASE = x * logaa(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = i + 1
return result
if __name__ == "__main__":
print(solution())
| 109 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE = set()
# Replace all the whitespace in our sentence
SCREAMING_SNAKE_CASE = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_UpperCamelCase ) == 26
def __lowerCAmelCase ( _UpperCamelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [False] * 26
for char in input_str:
if char.islower():
SCREAMING_SNAKE_CASE = True
elif char.isupper():
SCREAMING_SNAKE_CASE = True
return all(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from timeit import timeit
SCREAMING_SNAKE_CASE = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=_UpperCamelCase ) )
print(timeit('is_pangram_faster()' , setup=_UpperCamelCase ) )
print(timeit('is_pangram_fastest()' , setup=_UpperCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=True , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : Dict=5 , snake_case__ : str=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : str=2 , snake_case__ : int=0.02 , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , 'capital' )
| 673 | 1 |
a__ = '''Input must be a string of 8 numbers plus letter'''
a__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __UpperCAmelCase ( __a : str ) -> bool:
"""simple docstring"""
if not isinstance(__a ,__a ):
_a : List[str] = F"""Expected string as input, found {type(__a ).__name__}"""
raise TypeError(__a )
_a : List[Any] = spanish_id.replace('''-''' ,'''''' ).upper()
if len(__a ) != 9:
raise ValueError(__a )
try:
_a : Any = int(spanish_id_clean[0:8] )
_a : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__a ) from ex
if letter.isdigit():
raise ValueError(__a )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | class _A ( __UpperCamelCase ):
pass
class _A ( __UpperCamelCase ):
pass
class _A :
def __init__(self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = [
[],
[],
[],
]
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('''Maximum queue size is 100''' )
self.queues[priority].append(SCREAMING_SNAKE_CASE_ )
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''' )
def _a (self ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('''All queues are empty''' )
def __str__(self ) -> str:
'''simple docstring'''
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _A :
def __init__(self ) -> str:
'''simple docstring'''
UpperCamelCase__ = []
def _a (self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError('''Maximum queue size is 100''' )
self.queue.append(SCREAMING_SNAKE_CASE_ )
def _a (self ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError('''The queue is empty''' )
else:
UpperCamelCase__ = min(self.queue )
self.queue.remove(SCREAMING_SNAKE_CASE_ )
return data
def __str__(self ) -> str:
'''simple docstring'''
return str(self.queue )
def __UpperCamelCase ( ):
UpperCamelCase__ = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCamelCase ( ):
UpperCamelCase__ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 415 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : list[list[int]] = []
snake_case_ : list[int] = []
snake_case_ : Dict = 0
snake_case_ : List[str] = sum(__SCREAMING_SNAKE_CASE )
create_state_space_tree(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
return result
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, ):
"""simple docstring"""
if sum(__SCREAMING_SNAKE_CASE ) > max_sum or (remaining_nums_sum + sum(__SCREAMING_SNAKE_CASE )) < max_sum:
return
if sum(__SCREAMING_SNAKE_CASE ) == max_sum:
result.append(__SCREAMING_SNAKE_CASE )
return
for index in range(__SCREAMING_SNAKE_CASE, len(__SCREAMING_SNAKE_CASE ) ):
create_state_space_tree(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, index + 1, [*path, nums[index]], __SCREAMING_SNAKE_CASE, remaining_nums_sum - nums[index], )
a_ = [3, 34, 4, 12, 5, 2]
a_ = 9
a_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 92 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase_ :
def __init__( self):
snake_case_ : List[Any] = [2, 1, 2, -1]
snake_case_ : int = [1, 2, 3, 4]
def snake_case__ ( self):
snake_case_ : str = len(self.first_signal)
snake_case_ : Any = len(self.second_signal)
snake_case_ : List[Any] = max(lowercase_ , lowercase_)
# create a zero matrix of max_length x max_length
snake_case_ : Dict = [[0] * max_length for i in range(lowercase_)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase_):
snake_case_ : List[str] = deque(self.second_signal)
rotated_signal.rotate(lowercase_)
for j, item in enumerate(lowercase_):
matrix[i][j] += item
# multiply the matrix with the first signal
snake_case_ : Any = np.matmul(np.transpose(lowercase_) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(lowercase_ , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 92 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A : List[str] = logging.get_logger(__name__)
A : List[Any] = {'vocab_file': 'spiece.model'}
A : Tuple = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
A : Any = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
A : Tuple = 0
A : str = 1
A : str = 2
A : Union[str, Any] = 3
A : Optional[Any] = 4
class UpperCamelCase( _a ):
snake_case_ : Union[str, Any] = VOCAB_FILES_NAMES
snake_case_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : Tuple = """left"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple="<s>" , SCREAMING_SNAKE_CASE : Optional[int]="</s>" , SCREAMING_SNAKE_CASE : List[str]="<unk>" , SCREAMING_SNAKE_CASE : List[str]="<sep>" , SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<cls>" , SCREAMING_SNAKE_CASE : str="<mask>" , SCREAMING_SNAKE_CASE : int=["<eop>", "<eod>"] , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE : Dict , ) -> None:
'''simple docstring'''
__snake_case = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
__snake_case = 3
__snake_case = do_lower_case
__snake_case = remove_space
__snake_case = keep_accents
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
'''simple docstring'''
__snake_case = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> str:
'''simple docstring'''
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
'''simple docstring'''
__snake_case = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> int:
'''simple docstring'''
if self.remove_space:
__snake_case = " ".join(inputs.strip().split() )
else:
__snake_case = inputs
__snake_case = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__snake_case = unicodedata.normalize("NFKD" , SCREAMING_SNAKE_CASE )
__snake_case = "".join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
__snake_case = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : str ) -> List[str]:
'''simple docstring'''
__snake_case = self.preprocess_text(SCREAMING_SNAKE_CASE )
__snake_case = self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
__snake_case = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__snake_case = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__snake_case = cur_pieces[1:]
else:
__snake_case = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(SCREAMING_SNAKE_CASE )
else:
new_pieces.append(SCREAMING_SNAKE_CASE )
return new_pieces
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : int ) -> List[Any]:
'''simple docstring'''
__snake_case = "".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , " " ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : Optional[int] , ) -> str:
'''simple docstring'''
__snake_case = kwargs.pop("use_source_tokenizer" , SCREAMING_SNAKE_CASE )
__snake_case = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__snake_case = []
__snake_case = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE ) )
__snake_case = []
sub_texts.append(SCREAMING_SNAKE_CASE )
else:
current_sub_text.append(SCREAMING_SNAKE_CASE )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__snake_case = "".join(SCREAMING_SNAKE_CASE )
__snake_case = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__snake_case = self.clean_up_tokenization(SCREAMING_SNAKE_CASE )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1]
return ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , "wb" ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 371 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class UpperCamelCase( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any=1 ) -> List[str]:
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-single''' , instance_count=SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any ) -> List[str]:
'''simple docstring'''
TrainingJobAnalytics(SCREAMING_SNAKE_CASE ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__snake_case = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__snake_case = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , SCREAMING_SNAKE_CASE )
| 371 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _a :
"""simple docstring"""
def __init__( self : Optional[Any] , lowercase_ : str , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = 13
lowercase_ = 7
lowercase_ = True
lowercase_ = True
lowercase_ = True
lowercase_ = 99
lowercase_ = 32
lowercase_ = 2
lowercase_ = 4
lowercase_ = 37
lowercase_ = """gelu"""
lowercase_ = 0.1
lowercase_ = 0.1
lowercase_ = 512
lowercase_ = 16
lowercase_ = 2
lowercase_ = 0.0_2
lowercase_ = 3
lowercase_ = 4
lowercase_ = None
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = self.prepare_config_and_inputs()
lowercase_ = True
lowercase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase__ ( self : List[str] , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any] ):
'''simple docstring'''
lowercase_ = TFEsmModel(config=lowercase_ )
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase_ = model(lowercase_ )
lowercase_ = [input_ids, input_mask]
lowercase_ = model(lowercase_ )
lowercase_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , lowercase_ : Any , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = TFEsmModel(config=lowercase_ )
lowercase_ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
lowercase_ = model(lowercase_ )
lowercase_ = [input_ids, input_mask]
lowercase_ = model(lowercase_ , encoder_hidden_states=lowercase_ )
# Also check the case where encoder outputs are not passed
lowercase_ = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Any ):
'''simple docstring'''
lowercase_ = TFEsmForMaskedLM(config=lowercase_ )
lowercase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict ):
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = TFEsmForTokenClassification(config=lowercase_ )
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _a ( __a , __a , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
A_ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ = False
A_ = False
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = TFEsmModelTester(self )
lowercase_ = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase_ )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = TFEsmModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(lowercase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowercase_ = model.get_bias()
assert isinstance(lowercase_ , lowercase_ )
for k, v in name.items():
assert isinstance(lowercase_ , tf.Variable )
else:
lowercase_ = model.get_output_embeddings()
assert x is None
lowercase_ = model.get_bias()
assert name is None
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowercase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase_ = model(lowercase_ )[0]
lowercase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowercase_ )
# compare the actual values for a slice.
lowercase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowercase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase_ = model(lowercase_ )[0]
# compare the actual values for a slice.
lowercase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 603 | '''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ) ->bool:
lowercase_ = set()
# Replace all the whitespace in our sentence
lowercase_ = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 26
def A_ ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ) ->bool:
lowercase_ = [False] * 26
for char in input_str:
if char.islower():
lowercase_ = True
elif char.isupper():
lowercase_ = True
return all(SCREAMING_SNAKE_CASE_ )
def A_ ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ) ->bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def A_ ( ) ->None:
from timeit import timeit
lowercase_ = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit("""is_pangram_faster()""" , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit("""is_pangram_fastest()""" , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 603 | 1 |
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__A : Dict = HfApi()
__A : List[str] = {}
# fmt: off
__A : List[Any] = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
__A : List[str] = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
__A : str = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
__A : Tuple = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
__A : str = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
__A : str = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
__A : str = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
__A : int = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
__A : List[str] = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
__A : Any = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
__A : List[str] = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
__A : List[str] = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
__A : int = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
__A : int = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
__A : int = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
__A : List[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__A : Optional[Any] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
__A : Dict = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
__A : Tuple = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__A : int = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__A : Any = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__A : List[Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 656 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Tuple:
'''simple docstring'''
_lowercase : Tuple = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_lowercase : Optional[Any] = load_file(UpperCAmelCase_ )
_lowercase : Optional[Any] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_lowercase : List[Any] = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
_lowercase : int = pipeline.text_encoder
else:
_lowercase : Optional[int] = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
_lowercase : Optional[int] = pipeline.unet
# find the target layer
_lowercase : Union[str, Any] = layer_infos.pop(0 )
while len(UpperCAmelCase_ ) > -1:
try:
_lowercase : Optional[Any] = curr_layer.__getattr__(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
_lowercase : Tuple = layer_infos.pop(0 )
elif len(UpperCAmelCase_ ) == 0:
break
except Exception:
if len(UpperCAmelCase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_lowercase : Tuple = layer_infos.pop(0 )
_lowercase : Optional[Any] = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(UpperCAmelCase_ )
else:
pair_keys.append(UpperCAmelCase_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_lowercase : str = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_lowercase : int = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase_ , UpperCAmelCase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
_lowercase : Optional[int] = state_dict[pair_keys[0]].to(torch.floataa )
_lowercase : int = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase_ , UpperCAmelCase_ )
# update visited list
for item in pair_keys:
visited.append(UpperCAmelCase_ )
return pipeline
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = args.base_model_path
UpperCamelCase__ = args.checkpoint_path
UpperCamelCase__ = args.dump_path
UpperCamelCase__ = args.lora_prefix_unet
UpperCamelCase__ = args.lora_prefix_text_encoder
UpperCamelCase__ = args.alpha
UpperCamelCase__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCamelCase__ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 322 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowercase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowercase : Any = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=8 ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase__ : List[str] =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any=512 , __lowerCamelCase : Optional[int]=512 ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowerCamelCase__ : List[str] =np.array(pil_image.convert('''RGB''' ) )
lowerCamelCase__ : int =arr.astype(np.floataa ) / 1_27.5 - 1
lowerCamelCase__ : Optional[Any] =np.transpose(__lowerCamelCase , [2, 0, 1] )
lowerCamelCase__ : List[str] =torch.from_numpy(__lowerCamelCase ).unsqueeze(0 )
return image
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : UNetaDConditionModel, lowerCamelCase : DDPMScheduler, lowerCamelCase : VQModel, )-> Optional[Any]:
super().__init__()
self.register_modules(
unet=lowerCamelCase, scheduler=lowerCamelCase, movq=lowerCamelCase, )
lowerCamelCase__ : Optional[Any] =2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case ( self : Dict, lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : str )-> Dict:
# get the original timestep using init_timestep
lowerCamelCase__ : List[Any] =min(int(num_inference_steps * strength ), lowerCamelCase )
lowerCamelCase__ : Tuple =max(num_inference_steps - init_timestep, 0 )
lowerCamelCase__ : str =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case ( self : Optional[int], lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Optional[int]=None )-> Union[str, Any]:
if not isinstance(lowerCamelCase, (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase )}''' )
lowerCamelCase__ : List[str] =image.to(device=lowerCamelCase, dtype=lowerCamelCase )
lowerCamelCase__ : Dict =batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowerCamelCase__ : List[str] =image
else:
if isinstance(lowerCamelCase, lowerCamelCase ) and len(lowerCamelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCamelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =[
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCamelCase )
]
lowerCamelCase__ : List[Any] =torch.cat(lowerCamelCase, dim=0 )
else:
lowerCamelCase__ : int =self.movq.encode(lowerCamelCase ).latent_dist.sample(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.movq.config.scaling_factor * init_latents
lowerCamelCase__ : Optional[int] =torch.cat([init_latents], dim=0 )
lowerCamelCase__ : Tuple =init_latents.shape
lowerCamelCase__ : Union[str, Any] =randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=lowerCamelCase, dtype=lowerCamelCase )
# get latents
lowerCamelCase__ : Any =self.scheduler.add_noise(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Tuple =init_latents
return latents
def snake_case ( self : Optional[int], lowerCamelCase : int=0 )-> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowerCamelCase__ : Optional[Any] =torch.device(F'''cuda:{gpu_id}''' )
lowerCamelCase__ : str =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[str], lowerCamelCase : str=0 )-> List[Any]:
if is_accelerate_available() and is_accelerate_version('''>=''', '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowerCamelCase__ : List[str] =torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''', silence_dtype_warnings=lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase__ : str =None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase__ , lowerCamelCase__ : int =cpu_offload_with_hook(lowerCamelCase, lowerCamelCase, prev_module_hook=lowerCamelCase )
# We'll offload the last model manually.
lowerCamelCase__ : int =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] )-> List[str]:
if not hasattr(self.unet, '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase, '''_hf_hook''' )
and hasattr(module._hf_hook, '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase )
def __call__( self : List[str], lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]], lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]], lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]], lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 100, lowerCamelCase : float = 4.0, lowerCamelCase : float = 0.3, lowerCamelCase : int = 1, lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, )-> List[str]:
lowerCamelCase__ : Tuple =self._execution_device
lowerCamelCase__ : str =guidance_scale > 1.0
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[str] =torch.cat(lowerCamelCase, dim=0 )
lowerCamelCase__ : Any =image_embeds.shape[0]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =torch.cat(lowerCamelCase, dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ : Dict =image_embeds.repeat_interleave(lowerCamelCase, dim=0 )
lowerCamelCase__ : Optional[int] =negative_image_embeds.repeat_interleave(lowerCamelCase, dim=0 )
lowerCamelCase__ : Any =torch.cat([negative_image_embeds, image_embeds], dim=0 ).to(dtype=self.unet.dtype, device=lowerCamelCase )
if not isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Optional[int] =[image]
if not all(isinstance(lowerCamelCase, (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'''Input is in incorrect format: {[type(lowerCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
lowerCamelCase__ : Union[str, Any] =torch.cat([prepare_image(lowerCamelCase, lowerCamelCase, lowerCamelCase ) for i in image], dim=0 )
lowerCamelCase__ : int =image.to(dtype=image_embeds.dtype, device=lowerCamelCase )
lowerCamelCase__ : Tuple =self.movq.encode(lowerCamelCase )['''latents''']
lowerCamelCase__ : int =latents.repeat_interleave(lowerCamelCase, dim=0 )
self.scheduler.set_timesteps(lowerCamelCase, device=lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Tuple =self.get_timesteps(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any =timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowerCamelCase__ , lowerCamelCase__ : str =downscale_height_and_width(lowerCamelCase, lowerCamelCase, self.movq_scale_factor )
lowerCamelCase__ : Union[str, Any] =self.prepare_latents(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, image_embeds.dtype, lowerCamelCase, lowerCamelCase )
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ : Dict =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ : Dict ={'''image_embeds''': image_embeds}
lowerCamelCase__ : int =self.unet(
sample=lowerCamelCase, timestep=lowerCamelCase, encoder_hidden_states=lowerCamelCase, added_cond_kwargs=lowerCamelCase, return_dict=lowerCamelCase, )[0]
if do_classifier_free_guidance:
lowerCamelCase__ , lowerCamelCase__ : Dict =noise_pred.split(latents.shape[1], dim=1 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =noise_pred.chunk(2 )
lowerCamelCase__ , lowerCamelCase__ : int =variance_pred.chunk(2 )
lowerCamelCase__ : Optional[Any] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase__ : Union[str, Any] =torch.cat([noise_pred, variance_pred_text], dim=1 )
if not (
hasattr(self.scheduler.config, '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase__ , lowerCamelCase__ : List[str] =noise_pred.split(latents.shape[1], dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : int =self.scheduler.step(
lowerCamelCase, lowerCamelCase, lowerCamelCase, generator=lowerCamelCase, )[0]
# post-processing
lowerCamelCase__ : Union[str, Any] =self.movq.decode(lowerCamelCase, force_not_quantize=lowerCamelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowerCamelCase__ : Dict =image * 0.5 + 0.5
lowerCamelCase__ : str =image.clamp(0, 1 )
lowerCamelCase__ : List[str] =image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase__ : Tuple =self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 625 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
_a = ['onnx']
def __init__( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : str )-> Optional[int]:
requires_backends(self, ['''onnx'''] )
@classmethod
def snake_case ( cls : List[str], *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
@classmethod
def snake_case ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Tuple )-> Optional[int]:
requires_backends(cls, ['''onnx'''] )
| 625 | 1 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_lowerCAmelCase = logging.getLogger(__name__)
class __UpperCamelCase :
def __init__( self ):
'''simple docstring'''
_lowerCAmelCase : Any = False
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
if not self.initialized:
_lowerCAmelCase : Optional[int] = RagRetriever(
_lowercase ,question_encoder_tokenizer=_lowercase ,generator_tokenizer=_lowercase ,index=_lowercase ,init_retrieval=_lowercase ,)
_lowerCAmelCase : Union[str, Any] = True
def __lowerCamelCase ( self ):
'''simple docstring'''
self.retriever.index.init_index()
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.retriever._main_retrieve(_lowercase ,_lowercase )
return doc_ids, retrieved_doc_embeds
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self ,_A ,_A ,_A ,_A ,_A=None ):
'''simple docstring'''
if index is not None and index.is_initialized() and len(_lowercase ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
_lowercase ,question_encoder_tokenizer=_lowercase ,generator_tokenizer=_lowercase ,index=_lowercase ,init_retrieval=_lowercase ,)
_lowerCAmelCase : Any = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_lowercase ,_lowercase ,_lowercase ,_lowercase )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ):
'''simple docstring'''
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
_lowerCAmelCase : Any = self.retrieval_workers[random.randint(0 ,len(self.retrieval_workers ) - 1 )]
_lowerCAmelCase, _lowerCAmelCase : List[str] = ray.get(random_worker.retrieve.remote(_lowercase ,_lowercase ) )
else:
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = self._main_retrieve(_lowercase ,_lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowercase )
@classmethod
def __lowerCamelCase ( cls ,_A ,_A=None ,**_A ):
'''simple docstring'''
return super(_lowercase ,cls ).get_tokenizers(_lowercase ,_lowercase ,**_lowercase )
@classmethod
def __lowerCamelCase ( cls ,_A ,_A ,_A=None ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('config' ,_lowercase ) or RagConfig.from_pretrained(_lowercase ,**_lowercase )
_lowerCAmelCase : Union[str, Any] = RagTokenizer.from_pretrained(_lowercase ,config=_lowercase )
_lowerCAmelCase : Tuple = rag_tokenizer.question_encoder
_lowerCAmelCase : Optional[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
_lowerCAmelCase : Any = 'custom'
_lowerCAmelCase : Union[str, Any] = CustomHFIndex(config.retrieval_vector_size ,_lowercase )
else:
_lowerCAmelCase : str = cls._build_index(_lowercase )
return cls(
_lowercase ,question_encoder_tokenizer=_lowercase ,generator_tokenizer=_lowercase ,retrieval_workers=_lowercase ,index=_lowercase ,)
| 259 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
lowerCAmelCase__ = namedtuple("""covid_data""", """cases deaths recovered""")
def lowerCamelCase_ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
'''simple docstring'''
_UpperCamelCase : List[str] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) )
lowerCAmelCase__ = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 648 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowerCAmelCase__ = {
"""sample_size""": 3_2,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [3_2, 6_4],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 6_4,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1_0_0_0,
"""block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""sample_size""": 2_5_6,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"""attention_head_dim""": 6_4,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 4_0,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
lowerCAmelCase__ = {
"""num_train_timesteps""": 1_5_1,
"""sigma_min""": 0.0_02,
"""sigma_max""": 80.0,
}
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight''']
_UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias''']
_UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 )
_UpperCamelCase : Optional[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight']
_UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias']
_UpperCamelCase : Dict = checkpoint['time_embed.2.weight']
_UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_UpperCamelCase : List[str] = checkpoint['label_emb.weight']
_UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight']
_UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_UpperCamelCase : Optional[int] = unet_config['down_block_types']
_UpperCamelCase : Optional[Any] = unet_config['layers_per_block']
_UpperCamelCase : Dict = unet_config['attention_head_dim']
_UpperCamelCase : List[str] = unet_config['block_out_channels']
_UpperCamelCase : str = 1
_UpperCamelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = channels_list[i]
_UpperCamelCase : str = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}'''
_UpperCamelCase : str = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False
_UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1'''
_UpperCamelCase : Dict = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0'''
_UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0'''
_UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
_UpperCamelCase : Tuple = current_channels
# hardcoded the mid-block for now
_UpperCamelCase : Any = 'mid_block.resnets.0'
_UpperCamelCase : Optional[Any] = 'middle_block.0'
_UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = 'mid_block.attentions.0'
_UpperCamelCase : Tuple = 'middle_block.1'
_UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Tuple = 'mid_block.resnets.1'
_UpperCamelCase : str = 'middle_block.2'
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0'''
_UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
_UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}'''
_UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1'''
_UpperCamelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
_UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2'''
_UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[Any] = checkpoint['out.0.weight']
_UpperCamelCase : str = checkpoint['out.0.bias']
_UpperCamelCase : int = checkpoint['out.2.weight']
_UpperCamelCase : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = strabool(args.class_cond)
lowerCAmelCase__ = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowerCAmelCase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
lowerCAmelCase__ = None
lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowerCAmelCase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowerCAmelCase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 648 | 1 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = 0.0_1
with locka.acquire():
with pytest.raises(A__ ):
__lowercase = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''a''' * 1000 + '''.lock'''
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 41 |
"""simple docstring"""
lowerCAmelCase__ = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase__ = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase__ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 621 | 0 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# For applying gaussian function for each element in matrix.
SCREAMING_SNAKE_CASE_ :int = math.sqrt(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Creates a gaussian kernel of given dimension.
SCREAMING_SNAKE_CASE_ :int = np.zeros((kernel_size, kernel_size) )
for i in range(0 , SCREAMING_SNAKE_CASE ):
for j in range(0 , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Tuple = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
SCREAMING_SNAKE_CASE_ :int = np.zeros(img.shape )
SCREAMING_SNAKE_CASE_ :Tuple = get_gauss_kernel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :str = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
SCREAMING_SNAKE_CASE_ :Optional[int] = get_slice(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[str] = img_s - img_s[kernel_size // 2, kernel_size // 2]
SCREAMING_SNAKE_CASE_ :int = vec_gaussian(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = np.multiply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[str] = np.sum(SCREAMING_SNAKE_CASE ) / np.sum(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Dict = val
return imga
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Tuple = args[1] if args[1:] else '../image_data/lena.jpg'
SCREAMING_SNAKE_CASE_ :Optional[int] = float(args[2] ) if args[2:] else 1.0
SCREAMING_SNAKE_CASE_ :Optional[Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
SCREAMING_SNAKE_CASE_ :str = int(args[4] )
SCREAMING_SNAKE_CASE_ :str = kernel_size + abs(kernel_size % 2 - 1 )
else:
SCREAMING_SNAKE_CASE_ :List[str] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = parse_args(sys.argv)
SCREAMING_SNAKE_CASE__ : int = cva.imread(filename, 0)
cva.imshow("""input image""", img)
SCREAMING_SNAKE_CASE__ : Dict = img / 2_55
SCREAMING_SNAKE_CASE__ : Tuple = out.astype("""float32""")
SCREAMING_SNAKE_CASE__ : str = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
SCREAMING_SNAKE_CASE__ : str = out * 2_55
SCREAMING_SNAKE_CASE__ : Dict = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 700 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : Tuple = 'xlm'
__snake_case : int = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self : str , SCREAMING_SNAKE_CASE : Dict=30_145 , SCREAMING_SNAKE_CASE : str=2_048 , SCREAMING_SNAKE_CASE : Optional[int]=12 , SCREAMING_SNAKE_CASE : Union[str, Any]=16 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : Dict=1 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : str=512 , SCREAMING_SNAKE_CASE : Any=2_048**-0.5 , SCREAMING_SNAKE_CASE : Dict=1E-12 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : Dict=0 , SCREAMING_SNAKE_CASE : int=1 , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : List[Any]=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=5 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Optional[int]="first" , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : str=5 , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : Tuple=0 , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : Optional[Any]=0 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ :List[str] = emb_dim
SCREAMING_SNAKE_CASE_ :Any = n_layers
SCREAMING_SNAKE_CASE_ :List[Any] = n_heads
SCREAMING_SNAKE_CASE_ :List[Any] = dropout
SCREAMING_SNAKE_CASE_ :Dict = attention_dropout
SCREAMING_SNAKE_CASE_ :Optional[Any] = gelu_activation
SCREAMING_SNAKE_CASE_ :Any = sinusoidal_embeddings
SCREAMING_SNAKE_CASE_ :Any = causal
SCREAMING_SNAKE_CASE_ :str = asm
SCREAMING_SNAKE_CASE_ :Optional[Any] = n_langs
SCREAMING_SNAKE_CASE_ :Any = use_lang_emb
SCREAMING_SNAKE_CASE_ :Any = layer_norm_eps
SCREAMING_SNAKE_CASE_ :int = bos_index
SCREAMING_SNAKE_CASE_ :int = eos_index
SCREAMING_SNAKE_CASE_ :Optional[int] = pad_index
SCREAMING_SNAKE_CASE_ :List[Any] = unk_index
SCREAMING_SNAKE_CASE_ :Tuple = mask_index
SCREAMING_SNAKE_CASE_ :str = is_encoder
SCREAMING_SNAKE_CASE_ :Any = max_position_embeddings
SCREAMING_SNAKE_CASE_ :int = embed_init_std
SCREAMING_SNAKE_CASE_ :Optional[int] = init_std
SCREAMING_SNAKE_CASE_ :Tuple = summary_type
SCREAMING_SNAKE_CASE_ :Union[str, Any] = summary_use_proj
SCREAMING_SNAKE_CASE_ :Optional[int] = summary_activation
SCREAMING_SNAKE_CASE_ :List[str] = summary_proj_to_labels
SCREAMING_SNAKE_CASE_ :Tuple = summary_first_dropout
SCREAMING_SNAKE_CASE_ :Any = start_n_top
SCREAMING_SNAKE_CASE_ :List[Any] = end_n_top
SCREAMING_SNAKE_CASE_ :Optional[Any] = mask_token_id
SCREAMING_SNAKE_CASE_ :Optional[Any] = lang_id
if "n_words" in kwargs:
SCREAMING_SNAKE_CASE_ :int = kwargs['n_words']
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class __lowerCAmelCase( lowerCAmelCase__ ):
@property
def _lowercase ( self : Optional[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ :Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ :Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 233 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : List[str] = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = 'camembert'
def __init__( self : List[Any], _snake_case : List[Any]=30_522, _snake_case : Dict=768, _snake_case : str=12, _snake_case : str=12, _snake_case : List[str]=3_072, _snake_case : Union[str, Any]="gelu", _snake_case : Tuple=0.1, _snake_case : List[str]=0.1, _snake_case : int=512, _snake_case : Tuple=2, _snake_case : int=0.02, _snake_case : str=1E-12, _snake_case : int=1, _snake_case : Optional[int]=0, _snake_case : Any=2, _snake_case : Union[str, Any]="absolute", _snake_case : int=True, _snake_case : Optional[int]=None, **_snake_case : Optional[Any], ):
'''simple docstring'''
super().__init__(pad_token_id=_snake_case, bos_token_id=_snake_case, eos_token_id=_snake_case, **_snake_case )
snake_case : Tuple =vocab_size
snake_case : Any =hidden_size
snake_case : Any =num_hidden_layers
snake_case : Optional[int] =num_attention_heads
snake_case : Optional[int] =hidden_act
snake_case : List[str] =intermediate_size
snake_case : Optional[Any] =hidden_dropout_prob
snake_case : Any =attention_probs_dropout_prob
snake_case : str =max_position_embeddings
snake_case : Union[str, Any] =type_vocab_size
snake_case : Any =initializer_range
snake_case : List[str] =layer_norm_eps
snake_case : str =position_embedding_type
snake_case : int =use_cache
snake_case : Optional[int] =classifier_dropout
class lowerCAmelCase_ ( a_ ):
@property
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : str ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case : str ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 349 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( a_ , unittest.TestCase ):
__UpperCAmelCase = DebertaTokenizer
__UpperCAmelCase = True
__UpperCAmelCase = DebertaTokenizerFast
def __snake_case ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : List[Any] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
snake_case : Dict =dict(zip(_snake_case, range(len(_snake_case ) ) ) )
snake_case : Tuple =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case : List[Any] ={'''unk_token''': '''[UNK]'''}
snake_case : List[Any] =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case : Tuple =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def __snake_case ( self : str, **_snake_case : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_snake_case )
def __snake_case ( self : List[str], _snake_case : List[str] ):
'''simple docstring'''
snake_case : List[str] ='''lower newer'''
snake_case : Optional[int] ='''lower newer'''
return input_text, output_text
def __snake_case ( self : Any ):
'''simple docstring'''
snake_case : List[Any] =self.get_tokenizer()
snake_case : List[Any] ='''lower newer'''
snake_case : Union[str, Any] =['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case : str =tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case, _snake_case )
snake_case : Any =tokens + [tokenizer.unk_token]
snake_case : List[Any] =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ), _snake_case )
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : Optional[Any] =self.get_tokenizer()
snake_case : Any =tokenizer('''Hello''', '''World''' )
snake_case : List[Any] =[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''], _snake_case )
@slow
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : int =self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
snake_case : List[Any] =tokenizer.encode('''sequence builders''', add_special_tokens=_snake_case )
snake_case : str =tokenizer.encode('''multi-sequence build''', add_special_tokens=_snake_case )
snake_case : Union[str, Any] =tokenizer.encode(
'''sequence builders''', add_special_tokens=_snake_case, add_prefix_space=_snake_case )
snake_case : Optional[int] =tokenizer.encode(
'''sequence builders''', '''multi-sequence build''', add_special_tokens=_snake_case, add_prefix_space=_snake_case )
snake_case : str =tokenizer.build_inputs_with_special_tokens(_snake_case )
snake_case : Tuple =tokenizer.build_inputs_with_special_tokens(_snake_case, _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __snake_case ( self : Dict ):
'''simple docstring'''
snake_case : int =[self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case : Optional[int] =tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
snake_case : Optional[Any] =[
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
snake_case : int =tokenizer(_snake_case, padding=_snake_case )
snake_case : str =[tokenizer.decode(_snake_case, skip_special_tokens=_snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
snake_case : Optional[Any] ={
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case : Tuple =[
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data, _snake_case )
for expected, decoded in zip(_snake_case, _snake_case ):
self.assertEqual(_snake_case, _snake_case )
| 349 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : int = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
UpperCAmelCase : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : str = np.zeros((n + 1,) )
UpperCAmelCase : Optional[Any] = ya
UpperCAmelCase : Union[str, Any] = xa
for k in range(_lowercase ):
UpperCAmelCase : Dict = y[k] + step_size * ode_func(_lowercase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(_lowercase , y[k] ) + ode_func(x + step_size , _lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ : str = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=2 , lowercase__=True , lowercase__=False , lowercase__=10 , lowercase__=3 , lowercase__=32 * 8 , lowercase__=32 * 8 , lowercase__=4 , lowercase__=64 , ) -> Dict:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = is_training
__UpperCAmelCase = use_auxiliary_loss
__UpperCAmelCase = num_queries
__UpperCAmelCase = num_channels
__UpperCAmelCase = min_size
__UpperCAmelCase = max_size
__UpperCAmelCase = num_labels
__UpperCAmelCase = hidden_dim
__UpperCAmelCase = hidden_dim
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowercase__ )
__UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase__ )
__UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase__ ) > 0.5
).float()
__UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowercase__ ) > 0.5).long()
__UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__UpperCAmelCase = self.num_queries
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = [1, 1, 1, 1]
__UpperCAmelCase = self.num_channels
__UpperCAmelCase = 64
__UpperCAmelCase = 128
__UpperCAmelCase = self.hidden_dim
__UpperCAmelCase = self.hidden_dim
__UpperCAmelCase = self.hidden_dim
return config
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Optional[int]:
__UpperCAmelCase = output.encoder_hidden_states
__UpperCAmelCase = output.pixel_decoder_hidden_states
__UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowercase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase__ ) , config.decoder_layers )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__=False ) -> Union[str, Any]:
with torch.no_grad():
__UpperCAmelCase = MaskaFormerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(pixel_values=lowercase__ , pixel_mask=lowercase__ )
__UpperCAmelCase = model(lowercase__ , output_hidden_states=lowercase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
__UpperCAmelCase = MaskaFormerForUniversalSegmentation(config=lowercase__ )
model.to(lowercase__ )
model.eval()
def comm_check_on_output(lowercase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__UpperCAmelCase = model(pixel_values=lowercase__ , pixel_mask=lowercase__ )
__UpperCAmelCase = model(lowercase__ )
comm_check_on_output(lowercase__ )
__UpperCAmelCase = model(
pixel_values=lowercase__ , pixel_mask=lowercase__ , mask_labels=lowercase__ , class_labels=lowercase__ )
comm_check_on_output(lowercase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
a__ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = MaskaFormerModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
self.config_tester.run_common_tests()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowercase__ , **lowercase__ , output_hidden_states=lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowercase__ )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def lowerCAmelCase_ (self ) -> Tuple:
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def lowerCAmelCase_ (self ) -> str:
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def lowerCAmelCase_ (self ) -> List[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowerCAmelCase_ (self ) -> Optional[int]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> int:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__UpperCAmelCase = MaskaFormerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = (self.model_tester.min_size,) * 2
__UpperCAmelCase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowercase__ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=lowercase__ ),
'''class_labels''': torch.zeros(2 , 10 , device=lowercase__ ).long(),
}
__UpperCAmelCase = self.model_tester.get_config()
__UpperCAmelCase = MaskaFormerForUniversalSegmentation(lowercase__ ).to(lowercase__ )
__UpperCAmelCase = model(**lowercase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowercase__ , **lowercase__ , output_hidden_states=lowercase__ )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ ).to(lowercase__ )
__UpperCAmelCase = model(**lowercase__ , output_attentions=lowercase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase_ (self ) -> str:
if not self.model_tester.is_training:
return
__UpperCAmelCase = self.all_model_classes[1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.train()
__UpperCAmelCase = model(lowercase__ , mask_labels=lowercase__ , class_labels=lowercase__ ).loss
loss.backward()
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = self.all_model_classes[1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = model_class(lowercase__ ).to(lowercase__ )
model.train()
__UpperCAmelCase = model(lowercase__ , mask_labels=lowercase__ , class_labels=lowercase__ )
__UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowercase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A_ : List[Any] = 1e-4
def __a ( ) -> str:
'''simple docstring'''
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ (self ) -> Union[str, Any]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase_ (self ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowercase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
__UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase__ , (1, 3, 384, 384) )
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
__UpperCAmelCase = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(lowercase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase__ , atol=lowercase__ ) )
__UpperCAmelCase = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(lowercase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase__ , atol=lowercase__ ) )
__UpperCAmelCase = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(lowercase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase__ , atol=lowercase__ ) )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowercase__ ).eval()
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
__UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase__ , (1, 3, 384, 384) )
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
# masks_queries_logits
__UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__UpperCAmelCase = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
__UpperCAmelCase = torch.tensor(lowercase__ ).to(lowercase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase__ , atol=lowercase__ ) )
# class_queries_logits
__UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__UpperCAmelCase = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase__ , atol=lowercase__ ) )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowercase__ ).eval()
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
__UpperCAmelCase = inputs['''pixel_values'''].to(lowercase__ )
__UpperCAmelCase = [el.to(lowercase__ ) for el in inputs['''mask_labels''']]
__UpperCAmelCase = [el.to(lowercase__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
self.assertTrue(outputs.loss is not None )
| 303 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = BlenderbotSmallTokenizer
_UpperCamelCase : Dict = False
def __A ( self : Optional[Any] ):
super().setUp()
lowerCAmelCase_ : Any =['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
lowerCAmelCase_ : Any =dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowerCAmelCase_ : str =['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
lowerCAmelCase_ : List[Any] ={'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
lowerCAmelCase_ : str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ : Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def __A ( self : str , **UpperCamelCase_ : Any ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __A ( self : Optional[Any] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase_ : int ='''adapt act apte'''
lowerCAmelCase_ : Optional[int] ='''adapt act apte'''
return input_text, output_text
def __A ( self : int ):
lowerCAmelCase_ : List[str] =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ : Tuple ='''adapt act apte'''
lowerCAmelCase_ : List[Any] =['''adapt''', '''act''', '''ap@@''', '''te''']
lowerCAmelCase_ : Optional[int] =tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase_ : int =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowerCAmelCase_ : List[Any] =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
def __A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] =BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
lowerCAmelCase_ : Tuple ='''I am a small frog.'''
lowerCAmelCase_ : Any =tok([src_text] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids''']
lowerCAmelCase_ : Optional[Any] =tok.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __A ( self : Dict ):
lowerCAmelCase_ : List[str] =BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
lowerCAmelCase_ : Union[str, Any] ='''I am a small frog .'''
lowerCAmelCase_ : Optional[int] ='''.'''
lowerCAmelCase_ : Tuple =tok(UpperCamelCase_ )['''input_ids''']
lowerCAmelCase_ : List[Any] =tok(UpperCamelCase_ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 709 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__lowercase = logging.get_logger(__name__)
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : List[str] ):
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 305 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__A : str = logging.get_logger(__name__)
__A : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
__A : Dict = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def __a ( A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = {}
with open(A__ , "r" ) as file:
for line_number, line in enumerate(A__ ):
SCREAMING_SNAKE_CASE = line.strip()
if line:
SCREAMING_SNAKE_CASE = line.split()
SCREAMING_SNAKE_CASE = line_number
SCREAMING_SNAKE_CASE = words[0]
SCREAMING_SNAKE_CASE = value
return result
def __a ( A__ : List[Any] , A__ : Dict , A__ : List[str] , A__ : int , A__ : Optional[Any] ):
for attribute in key.split("." ):
SCREAMING_SNAKE_CASE = getattr(A__ , A__ )
SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A__ ):
SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split("." )[-1]]
SCREAMING_SNAKE_CASE = "param"
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE = getattr(A__ , A__ ).shape
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE = hf_pointer
for attribute in hf_param_name.split("." ):
SCREAMING_SNAKE_CASE = getattr(A__ , A__ )
SCREAMING_SNAKE_CASE = shape_pointer.shape
# let's reduce dimension
SCREAMING_SNAKE_CASE = value[0]
else:
SCREAMING_SNAKE_CASE = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
SCREAMING_SNAKE_CASE = getattr(A__ , A__ )
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __a ( A__ : str , A__ : Union[str, Any] , A__ : Any , A__ : List[Any] , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A__ ):
SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split("." )[-1]]
SCREAMING_SNAKE_CASE = "param"
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE = ".".join([key, hf_param_name] )
else:
SCREAMING_SNAKE_CASE = key
SCREAMING_SNAKE_CASE = value if "lm_head" in full_key else value[0]
__A : str = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def __a ( A__ : Optional[int] , A__ : str , A__ : List[str]=None , A__ : List[Any]=None ):
SCREAMING_SNAKE_CASE = False
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE = name.split(A__ )[0].split("." )[-2]
SCREAMING_SNAKE_CASE = mapped_key.replace("*" , A__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE = "weight"
else:
SCREAMING_SNAKE_CASE = None
if hf_dict is not None:
rename_dict(A__ , A__ , A__ , A__ , A__ )
else:
set_recursively(A__ , A__ , A__ , A__ , A__ )
return is_used
return is_used
def __a ( A__ : Union[str, Any] , A__ : str , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE = True
else:
SCREAMING_SNAKE_CASE = load_wavaveca_layer(A__ , A__ , A__ )
if not is_used:
unused_weights.append(A__ )
logger.warning(F"Unused weights: {unused_weights}" )
def __a ( A__ : int , A__ : Union[str, Any] , A__ : Dict , A__ : Dict , A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE = name.split("." )
SCREAMING_SNAKE_CASE = int(items[0] )
SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
SCREAMING_SNAKE_CASE = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
SCREAMING_SNAKE_CASE = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
SCREAMING_SNAKE_CASE = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
SCREAMING_SNAKE_CASE = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(A__ )
@torch.no_grad()
def __a ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : Tuple=None , A__ : Any=None , A__ : Optional[Any]=True , A__ : List[str]=False ):
if config_path is not None:
SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(A__ )
else:
SCREAMING_SNAKE_CASE = WavaVecaConfig()
if is_seq_class:
SCREAMING_SNAKE_CASE = read_txt_into_dict(A__ )
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = WavaVecaForSequenceClassification(A__ )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , )
feature_extractor.save_pretrained(A__ )
elif is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE = Dictionary.load(A__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE = target_dict.pad_index
SCREAMING_SNAKE_CASE = target_dict.bos_index
SCREAMING_SNAKE_CASE = target_dict.eos_index
SCREAMING_SNAKE_CASE = len(target_dict.symbols )
SCREAMING_SNAKE_CASE = os.path.join(A__ , "vocab.json" )
if not os.path.isdir(A__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(A__ ) )
return
os.makedirs(A__ , exist_ok=A__ )
SCREAMING_SNAKE_CASE = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
with open(A__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(A__ , A__ )
SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
A__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=A__ , )
SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == "layer" else False
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , )
SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
processor.save_pretrained(A__ )
SCREAMING_SNAKE_CASE = WavaVecaForCTC(A__ )
else:
SCREAMING_SNAKE_CASE = WavaVecaForPreTraining(A__ )
if is_finetuned or is_seq_class:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
SCREAMING_SNAKE_CASE = argparse.Namespace(task="audio_pretraining" )
SCREAMING_SNAKE_CASE = fairseq.tasks.setup_task(A__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A__ )
SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(A__ , A__ , not is_finetuned )
hf_wavavec.save_pretrained(A__ )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
__A : Dict = parser.parse_args()
__A : Optional[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
) | 16 |
from __future__ import annotations
def A__ ( lowerCamelCase ) -> int:
UpperCamelCase_: Optional[Any] = len(lowerCamelCase ) // 2
# choose the middle 3 elements
UpperCamelCase_: Tuple = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 548 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 711 |
"""simple docstring"""
def snake_case (A_ :int , A_ :int ):
'''simple docstring'''
return base * power(A_ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
_UpperCamelCase : Any = int(input('Enter the base: ').strip())
_UpperCamelCase : str = int(input('Enter the exponent: ').strip())
_UpperCamelCase : int = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_UpperCamelCase : Union[str, Any] = 1 / result
print(f'''{base} to the power of {exponent} is {result}''')
| 118 | 0 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_lowercase = '''scheduler_config.json'''
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
UpperCamelCase_ = 5
UpperCamelCase_ = 6
UpperCamelCase_ = 7
UpperCamelCase_ = 8
UpperCamelCase_ = 9
UpperCamelCase_ = 1_0
UpperCamelCase_ = 1_1
UpperCamelCase_ = 1_2
UpperCamelCase_ = 1_3
UpperCamelCase_ = 1_4
@dataclass
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 42
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = SCHEDULER_CONFIG_NAME
UpperCamelCase_ = []
UpperCamelCase_ = True
@classmethod
def UpperCAmelCase_ ( cls : str ,lowerCAmelCase__ : Dict[str, Any] = None ,lowerCAmelCase__ : Optional[str] = None ,lowerCAmelCase__ : str=False ,**lowerCAmelCase__ : Optional[int] ,) -> Any:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = cls.load_config(
pretrained_model_name_or_path=lowerCAmelCase__ ,subfolder=lowerCAmelCase__ ,return_unused_kwargs=lowerCAmelCase__ ,return_commit_hash=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
return cls.from_config(lowerCAmelCase__ ,return_unused_kwargs=lowerCAmelCase__ ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Union[str, os.PathLike] ,lowerCAmelCase__ : bool = False ,**lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.save_config(save_directory=lowerCAmelCase__ ,push_to_hub=lowerCAmelCase__ ,**lowerCAmelCase__ )
@property
def UpperCAmelCase_ ( self : str ) -> str:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase_ ( cls : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase_ : str = importlib.import_module(__name__.split("." )[0] )
lowerCAmelCase_ : Optional[int] = [
getattr(lowerCAmelCase__ ,lowerCAmelCase__ ) for c in compatible_classes_str if hasattr(lowerCAmelCase__ ,lowerCAmelCase__ )
]
return compatible_classes
| 659 |
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : Optional[int] = is_leaf
lowerCAmelCase_ : List[str] = prefix
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : Optional[int] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Optional[Any] = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Dict = remaining_prefix
lowerCAmelCase_ : str = self.nodes[matching_string[0]]
lowerCAmelCase_ : Dict = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = aux_node
if remaining_word == "":
lowerCAmelCase_ : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : int = list(self.nodes.values() )[0]
lowerCAmelCase_ : List[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : int = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : List[str] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Union[str, Any] = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Optional[int] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : List[str] = merging_node.nodes
return True
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : List[Any] = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : Optional[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : str = RadixNode()
lowerCAmelCase_ : str = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 659 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Any = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowercase , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TensorFlowBenchmark(_lowercase )
SCREAMING_SNAKE_CASE__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''sgugger/tiny-distilbert-classification'''
SCREAMING_SNAKE_CASE__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , only_pretrain_model=_lowercase , )
SCREAMING_SNAKE_CASE__ : int = TensorFlowBenchmark(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : List[str] = TensorFlowBenchmark(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : int = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ : List[Any] = AutoConfig.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowercase , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TensorFlowBenchmark(_lowercase , [config] )
SCREAMING_SNAKE_CASE__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Any = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoConfig.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : int = TensorFlowBenchmark(_lowercase , [config] )
SCREAMING_SNAKE_CASE__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Tuple = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : Dict = TensorFlowBenchmark(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoConfig.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TensorFlowBenchmark(_lowercase , [config] )
SCREAMING_SNAKE_CASE__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : str = '''patrickvonplaten/t5-tiny-random'''
SCREAMING_SNAKE_CASE__ : int = AutoConfig.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TensorFlowBenchmark(_lowercase , configs=[config] )
SCREAMING_SNAKE_CASE__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Tuple = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_lowercase , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : Any = TensorFlowBenchmark(_lowercase )
SCREAMING_SNAKE_CASE__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_lowercase , save_to_csv=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowercase , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(_lowercase , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(_lowercase , '''env.csv''' ) , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TensorFlowBenchmark(_lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowercase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''env.csv''' ) ).exists() )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Tuple = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowercase : Dict ):
self.assertTrue(hasattr(_lowercase , '''sequential''' ) )
self.assertTrue(hasattr(_lowercase , '''cumulative''' ) )
self.assertTrue(hasattr(_lowercase , '''current''' ) )
self.assertTrue(hasattr(_lowercase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowercase , '''log.txt''' ) , log_print=_lowercase , trace_memory_line_by_line=_lowercase , eager_mode=_lowercase , multi_process=_lowercase , )
SCREAMING_SNAKE_CASE__ : int = TensorFlowBenchmark(_lowercase )
SCREAMING_SNAKE_CASE__ : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowercase , '''log.txt''' ) ).exists() )
| 713 |
import mpmath # for roots of unity
import numpy as np
class lowercase :
def __init__( self : Optional[Any] , _lowercase : List[Any]=None , _lowercase : str=None ):
# Input as list
SCREAMING_SNAKE_CASE__ : int = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : int = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : str = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : Tuple = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Dict = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : Union[str, Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Any = self.__multiply()
def lowercase__ ( self : Optional[int] , _lowercase : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Dict = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(_lowercase ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : int = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[] for i in range(_lowercase )]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : List[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowercase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : List[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowercase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : int = new_dft
SCREAMING_SNAKE_CASE__ : Any = next_ncol // 2
return dft[0]
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.__dft('''A''' )
SCREAMING_SNAKE_CASE__ : int = self.__dft('''B''' )
SCREAMING_SNAKE_CASE__ : int = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : Dict = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : Tuple = [[] for i in range(_lowercase )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : Dict = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : str = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
SCREAMING_SNAKE_CASE__ : str = '''A = ''' + ''' + '''.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : Tuple = '''B = ''' + ''' + '''.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : List[Any] = '''A*B = ''' + ''' + '''.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = ["image_processor", "tokenizer"]
__magic_name__ : Optional[Any] = "ViltImageProcessor"
__magic_name__ : List[Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Tuple , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : Any )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self.image_processor
def __call__( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : int = 0 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[str, TensorType]] = None , **lowerCAmelCase : List[str] , )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = self.tokenizer(
text=lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , stride=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_overflowing_tokens=lowerCAmelCase , return_special_tokens_mask=lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , return_length=lowerCAmelCase , verbose=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase , )
# add pixel_values + pixel_mask
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase )
encoding.update(lowerCAmelCase )
return encoding
def a__( self : List[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any] )-> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : int , *lowerCAmelCase : List[Any] , **lowerCAmelCase : List[str] )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : Any )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__( self : int )-> int:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : Tuple )-> List[str]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 210 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase__ ( A : Dict ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def lowerCamelCase__ ( A : Dict , A : Optional[Any] ):
'''simple docstring'''
return (-y * np.log(A ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase__ ( A : Union[str, Any] , A : Any , A : int ):
'''simple docstring'''
UpperCAmelCase = np.dot(A , A )
return np.sum(y * scores - np.log(1 + np.exp(A ) ) )
def lowerCamelCase__ ( A : List[str] , A : str , A : int , A : Dict=7_00_00 ):
'''simple docstring'''
UpperCAmelCase = np.zeros(x.shape[1] )
for iterations in range(A ):
UpperCAmelCase = np.dot(A , A )
UpperCAmelCase = sigmoid_function(A )
UpperCAmelCase = np.dot(x.T , h - y ) / y.size
UpperCAmelCase = theta - alpha * gradient # updating the weights
UpperCAmelCase = np.dot(A , A )
UpperCAmelCase = sigmoid_function(A )
UpperCAmelCase = cost_function(A , A )
if iterations % 1_00 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_lowercase : List[str] = datasets.load_iris()
_lowercase : List[Any] = iris.data[:, :2]
_lowercase : Union[str, Any] = (iris.target != 0) * 1
_lowercase : Any = 0.1
_lowercase : Optional[Any] = logistic_reg(alpha, x, y, max_iterations=70000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
return sigmoid_function(
np.dot(A , A ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((_lowercase) , (_lowercase)) : Optional[Any] = (x[:, 0].min(), x[:, 0].max())
((_lowercase) , (_lowercase)) : Any = (x[:, 1].min(), x[:, 1].max())
((_lowercase) , (_lowercase)) : Any = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_lowercase : Any = np.c_[xxa.ravel(), xxa.ravel()]
_lowercase : Dict = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 210 | 1 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
UpperCAmelCase_ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
UpperCAmelCase_ = '''main'''
# Default branch name
UpperCAmelCase_ = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
UpperCAmelCase_ = '''aaaaaaa'''
# This commit does not exist, so we should 404.
UpperCAmelCase_ = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
UpperCAmelCase_ = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : str ):
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def lowerCAmelCase ( self : Dict , _lowercase : Optional[Any] ):
"""simple docstring"""
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def lowerCAmelCase ( self : Tuple , _lowercase : Optional[Any] ):
"""simple docstring"""
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def lowerCAmelCase ( self : int , _lowercase : List[str] ):
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
self.assertEqual(find_labels(_lowercase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowercase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowercase ) , ['''start_positions''', '''end_positions'''] )
class __magic_name__ ( __a ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_lowercase ) , ['''labels'''] )
@require_tf
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(find_labels(_lowercase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowercase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowercase ) , ['''start_positions''', '''end_positions'''] )
class __magic_name__ ( __a ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_lowercase ) , ['''labels'''] )
@require_flax
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(find_labels(_lowercase ) , [] )
self.assertEqual(find_labels(_lowercase ) , [] )
self.assertEqual(find_labels(_lowercase ) , [] )
class __magic_name__ ( __a ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_lowercase ) , [] ) | 710 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCAmelCase_ = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def lowerCAmelCase_ ( lowercase: str , lowercase: Optional[Any]=None , lowercase: List[str]=None , lowercase: List[str]=None ) -> Tuple:
'''simple docstring'''
_UpperCamelCase: int = True
while ask_again:
_UpperCamelCase: Any = input(lowercase )
try:
if default is not None and len(lowercase ) == 0:
return default
return convert_value(lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowercase )
def lowerCAmelCase_ ( lowercase: List[Any] , lowercase: Union[str, Any]=[] , lowercase: Union[str, Any]=None , lowercase: Union[str, Any]=0 ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase: List[str] = BulletMenu(lowercase , lowercase )
_UpperCamelCase: List[Any] = menu.run(default_choice=lowercase )
return convert_value(lowercase ) if convert_value is not None else result
def lowerCAmelCase_ ( lowercase: Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase: int = int(lowercase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def lowerCAmelCase_ ( lowercase: Tuple ) -> Any:
'''simple docstring'''
_UpperCamelCase: str = int(lowercase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def lowerCAmelCase_ ( lowercase: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase: Tuple = int(lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowerCAmelCase_ ( lowercase: int ) -> Any:
'''simple docstring'''
_UpperCamelCase: int = int(lowercase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def lowerCAmelCase_ ( lowercase: List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase: Dict = int(lowercase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def lowerCAmelCase_ ( lowercase: Any ) -> List[str]:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __magic_name__ ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def lowerCAmelCase ( self : str , _lowercase : Optional[int] , _lowercase : List[Any] , _lowercase : Dict , _lowercase : Any ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = super()._format_usage(_lowercase , _lowercase , _lowercase , _lowercase )
_UpperCamelCase: int = usage.replace('''<command> [<args>] ''' , '''''' )
return usage | 264 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.