code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
def lowercase__ ( __snake_case : List[Any] = 100 ):
'''simple docstring'''
UpperCAmelCase_ : int = set()
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : int = n + 1 # maximum limit
for a in range(2 , lowerCamelCase_ ):
for b in range(2 , lowerCamelCase_ ):
UpperCAmelCase_ : Tuple = a**b # calculates the current power
collect_powers.add(lowerCamelCase_ ) # adds the result to the set
return len(lowerCamelCase_ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 29 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Dict = JukeboxTokenizer
lowercase_ : Dict = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
import torch
_lowercase : str = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics')
_lowercase : Optional[Any] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
@require_torch
def UpperCamelCase ( self) -> int:
"""simple docstring"""
import torch
_lowercase : List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics')
_lowercase : List[str] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
| 21 | 0 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class UpperCamelCase :
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None) -> List[str]:
# Input as list
snake_case_ = list(poly_a or [0])[:]
snake_case_ = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
snake_case_ = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
snake_case_ = len(self.polyB)
# Add 0 to make lengths equal a power of 2
snake_case_ = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
snake_case_ = complex(mpmath.root(x=1, n=self.c_max_length, k=1))
# The product
snake_case_ = self.__multiply()
def a_ ( self, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(lowerCAmelCase__) <= 1:
return dft[0]
#
snake_case_ = self.c_max_length // 2
while next_ncol > 0:
snake_case_ = [[] for i in range(lowerCAmelCase__)]
snake_case_ = self.root**next_ncol
# First half of next step
snake_case_ = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(lowerCAmelCase__):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
snake_case_ = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(lowerCAmelCase__):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
snake_case_ = new_dft
snake_case_ = next_ncol // 2
return dft[0]
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.__dft('A')
snake_case_ = self.__dft('B')
snake_case_ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
snake_case_ = 2
while next_ncol <= self.c_max_length:
snake_case_ = [[] for i in range(lowerCAmelCase__)]
snake_case_ = self.root ** (next_ncol // 2)
snake_case_ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
snake_case_ = new_inverse_c
next_ncol *= 2
# Unpack
snake_case_ = [round(x[0].real, 8) + round(x[0].imag, 8) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self) -> Tuple:
snake_case_ = 'A = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
snake_case_ = 'B = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
snake_case_ = 'A*B = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product))
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312 | """simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
snake_case_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
snake_case_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
snake_case_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
snake_case_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
snake_case_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
snake_case_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] )
snake_case_ = config.vision_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ = int(key_split[3] )
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ = val.squeeze_()
else:
snake_case_ = val
return orig_state_dict
def UpperCAmelCase ( ) -> Any:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int:
snake_case_ = GroupViTConfig()
snake_case_ = GroupViTModel(UpperCAmelCase ).eval()
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model']
snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0)
# verify result
snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
snake_case_ = prepare_img()
snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 )
processor.save_pretrained(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
print('Successfully saved processor and model to' , UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase , organization='nielsr' )
model.push_to_hub(UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 312 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "speech_to_text_2"
UpperCAmelCase__ : Optional[int] = ["past_key_values"]
UpperCAmelCase__ : Tuple = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , A_=10000 , A_=6 , A_=2048 , A_=4 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=2 , A_=True , A_=1 , A_=0 , A_=2 , A_=1024 , **A_ , ) -> str:
__UpperCamelCase =vocab_size
__UpperCamelCase =d_model
__UpperCamelCase =decoder_ffn_dim
__UpperCamelCase =decoder_layers
__UpperCamelCase =decoder_attention_heads
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =activation_function
__UpperCamelCase =init_std
__UpperCamelCase =decoder_layerdrop
__UpperCamelCase =use_cache
__UpperCamelCase =decoder_layers
__UpperCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase =max_target_positions
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , decoder_start_token_id=A_ , **A_ , )
| 62 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : Any = data
_lowerCamelCase : Node | None = None
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : str = None
_lowerCamelCase : str = None
def __iter__( self ):
_lowerCamelCase : List[str] = self.head
while self.head:
yield node.data
_lowerCamelCase : Optional[int] = node.next
if node == self.head:
break
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join(str(lowercase ) for item in iter(self ) )
def A_ ( self , lowercase ):
self.insert_nth(len(self ) , lowercase )
def A_ ( self , lowercase ):
self.insert_nth(0 , lowercase )
def A_ ( self , lowercase , lowercase ):
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : List[Any] = Node(lowercase )
if self.head is None:
_lowerCamelCase : str = new_node # first node points itself
_lowerCamelCase : Union[str, Any] = new_node
elif index == 0: # insert at head
_lowerCamelCase : List[str] = self.head
_lowerCamelCase : str = new_node
else:
_lowerCamelCase : Union[str, Any] = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : Union[str, Any] = temp.next
_lowerCamelCase : List[str] = new_node
if index == len(self ) - 1: # insert at tail
_lowerCamelCase : Any = new_node
def A_ ( self ):
return self.delete_nth(0 )
def A_ ( self ):
return self.delete_nth(len(self ) - 1 )
def A_ ( self , lowercase = 0 ):
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : Any = self.head
if self.head == self.tail: # just one node
_lowerCamelCase : List[str] = None
elif index == 0: # delete head node
_lowerCamelCase : List[str] = self.tail.next.next
_lowerCamelCase : Optional[int] = self.head.next
else:
_lowerCamelCase : Dict = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : int = temp.next
_lowerCamelCase : Optional[int] = temp.next.next
if index == len(self ) - 1: # delete at tail
_lowerCamelCase : List[Any] = temp
return delete_node.data
def A_ ( self ):
return len(self ) == 0
def _snake_case ( ):
_lowerCamelCase : Union[str, Any] = CircularLinkedList()
assert len(lowercase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase__ ) == i
circular_linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
from PIL import Image
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Image:
"""simple docstring"""
def brightness(UpperCAmelCase ) -> float:
return 128 + level + (c - 128)
if not -2_55.0 <= level <= 2_55.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(UpperCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_A : Optional[Any] = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 265 |
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError('''only integers accepted as input''' )
else:
lowerCamelCase__ : Any = str(abs(UpperCAmelCase ) )
lowerCamelCase__ : Union[str, Any] = [list(UpperCAmelCase ) for char in range(len(UpperCAmelCase ) )]
for index in range(len(UpperCAmelCase ) ):
num_transpositions[index].pop(UpperCAmelCase )
return max(
int(''''''.join(list(UpperCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 265 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """roformer"""
def __init__( self , __UpperCAmelCase=50000 , __UpperCAmelCase=None , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1536 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0 , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size if embedding_size is None else embedding_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = rotary_value
__lowerCamelCase = use_cache
class __lowerCAmelCase ( lowerCAmelCase__ ):
@property
def lowerCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
__lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 330 |
from __future__ import annotations
from typing import Generic, TypeVar
a_ = TypeVar("""T""")
class __lowerCAmelCase ( Generic[T] ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = data
__lowerCamelCase = self
__lowerCamelCase = 0
class __lowerCAmelCase ( Generic[T] ):
def __init__( self ):
'''simple docstring'''
# map from node name to the node object
__lowerCamelCase = {}
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# create a new set with x as its member
__lowerCamelCase = DisjointSetTreeNode(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# find the set x belongs to (with path-compression)
__lowerCamelCase = self.map[data]
if elem_ref != elem_ref.parent:
__lowerCamelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# helper function for union operation
if nodea.rank > nodea.rank:
__lowerCamelCase = nodea
else:
__lowerCamelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# merge 2 disjoint sets
self.link(self.find_set(__UpperCAmelCase ) , self.find_set(__UpperCAmelCase ) )
class __lowerCAmelCase ( Generic[T] ):
def __init__( self ):
'''simple docstring'''
# connections: map from the node to the neighbouring nodes (with weights)
__lowerCamelCase = {}
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# add a node ONLY if its not present in the graph
if node not in self.connections:
__lowerCamelCase = {}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# add an edge with the given weight
self.add_node(__UpperCAmelCase )
self.add_node(__UpperCAmelCase )
__lowerCamelCase = weight
__lowerCamelCase = weight
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __UpperCAmelCase : x[2] )
# creating the disjoint set
__lowerCamelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__UpperCAmelCase )
# MST generation
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = edges[index]
index += 1
__lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase )
__lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
disjoint_set.union(__UpperCAmelCase , __UpperCAmelCase )
return graph
| 330 | 1 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
UpperCAmelCase ="\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
UpperCAmelCase ="\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
UpperCAmelCase ="\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] ,)
def UpperCamelCase__ ( self ) -> int:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_=None ,lowerCamelCase_="uniform_average" ,lowerCamelCase_=True ) -> str:
A = mean_squared_error(
A_ ,A_ ,sample_weight=A_ ,multioutput=A_ ,squared=A_ )
return {"mse": mse}
| 371 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
UpperCAmelCase =parser.parse_args()
UpperCAmelCase =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase =CLIPImageProcessor()
UpperCAmelCase =CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
UpperCAmelCase =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 77 | 0 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowerCamelCase =logging.get_logger(__name__)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = ['input_features', 'attention_mask']
def __init__( self : Dict ,snake_case : Tuple=80 ,snake_case : Optional[Any]=16000 ,snake_case : List[str]=0.0 ,snake_case : Tuple=10 ,snake_case : List[Any]=25 ,snake_case : int="hamming_window" ,snake_case : Optional[Any]=32_768.0 ,snake_case : str=0.97 ,snake_case : List[str]=1.0 ,snake_case : Union[str, Any]=True ,snake_case : List[Any]=True ,snake_case : int=False ,**snake_case : Union[str, Any] ,):
super().__init__(feature_size=snake_case ,sampling_rate=snake_case ,padding_value=snake_case ,**snake_case )
SCREAMING_SNAKE_CASE =feature_size
SCREAMING_SNAKE_CASE =sampling_rate
SCREAMING_SNAKE_CASE =padding_value
SCREAMING_SNAKE_CASE =hop_length
SCREAMING_SNAKE_CASE =win_length
SCREAMING_SNAKE_CASE =frame_signal_scale
SCREAMING_SNAKE_CASE =preemphasis_coeff
SCREAMING_SNAKE_CASE =mel_floor
SCREAMING_SNAKE_CASE =normalize_means
SCREAMING_SNAKE_CASE =normalize_vars
SCREAMING_SNAKE_CASE =win_function
SCREAMING_SNAKE_CASE =return_attention_mask
SCREAMING_SNAKE_CASE =win_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE =hop_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE =optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE =(self.n_fft // 2) + 1
def _lowerCAmelCase ( self : int ,snake_case : np.array ):
if self.win_function == "hamming_window":
SCREAMING_SNAKE_CASE =window_function(window_length=self.sample_size ,name=self.win_function ,periodic=snake_case )
else:
SCREAMING_SNAKE_CASE =window_function(window_length=self.sample_size ,name=self.win_function )
SCREAMING_SNAKE_CASE =mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
SCREAMING_SNAKE_CASE =spectrogram(
one_waveform * self.frame_signal_scale ,window=snake_case ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=snake_case ,preemphasis=self.preemphasis_coeff ,mel_filters=snake_case ,mel_floor=self.mel_floor ,log_mel='log' ,)
return msfc_features.T
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : int ,snake_case : List[Any] ):
# make sure we normalize float32 arrays
if self.normalize_means:
SCREAMING_SNAKE_CASE =x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE =np.subtract(snake_case ,snake_case )
if self.normalize_vars:
SCREAMING_SNAKE_CASE =x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE =np.divide(snake_case ,snake_case )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE =padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE =x.astype(np.floataa )
return x
def _lowerCAmelCase ( self : Dict ,snake_case : List[np.ndarray] ,snake_case : Optional[np.ndarray] = None ):
SCREAMING_SNAKE_CASE =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case ,snake_case ,self.padding_value ) for x, n in zip(snake_case ,snake_case )]
def __call__( self : Optional[Any] ,snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,snake_case : Union[bool, str, PaddingStrategy] = False ,snake_case : Optional[int] = None ,snake_case : bool = False ,snake_case : Optional[int] = None ,snake_case : Optional[bool] = None ,snake_case : Optional[Union[str, TensorType]] = None ,snake_case : Optional[int] = None ,**snake_case : str ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE =isinstance(snake_case ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
SCREAMING_SNAKE_CASE =is_batched_numpy or (
isinstance(snake_case ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE =[np.asarray(snake_case ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case ,np.ndarray ):
SCREAMING_SNAKE_CASE =np.asarray(snake_case ,dtype=np.floataa )
elif isinstance(snake_case ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE =[raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE =[self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE =BatchFeature({'input_features': features} )
SCREAMING_SNAKE_CASE =self.pad(
snake_case ,padding=snake_case ,max_length=snake_case ,truncation=snake_case ,pad_to_multiple_of=snake_case ,return_attention_mask=snake_case ,**snake_case ,)
# make sure list is in array format
SCREAMING_SNAKE_CASE =padded_inputs.get('input_features' )
if isinstance(input_features[0] ,snake_case ):
SCREAMING_SNAKE_CASE =[np.asarray(snake_case ,dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE =padded_inputs.get('attention_mask' )
if attention_mask is not None:
SCREAMING_SNAKE_CASE =[np.asarray(snake_case ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
SCREAMING_SNAKE_CASE =(
np.array(snake_case ,dtype=np.intaa )
if self._get_padding_strategies(snake_case ,max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
SCREAMING_SNAKE_CASE =self.normalize(
padded_inputs['input_features'] ,attention_mask=snake_case )
if return_tensors is not None:
SCREAMING_SNAKE_CASE =padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 334 |
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 334 | 1 |
"""simple docstring"""
_UpperCamelCase = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 367 |
"""simple docstring"""
import numpy as np
def _a ( _snake_case ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
SCREAMING_SNAKE_CASE = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
SCREAMING_SNAKE_CASE = {"facebook/blenderbot-3B": 128}
class UpperCAmelCase_ ( A_ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = BlenderbotTokenizer
def __init__( self : int , snake_case_ : Tuple=None , snake_case_ : str=None , snake_case_ : Union[str, Any]=None , snake_case_ : str="replace" , snake_case_ : List[Any]="<s>" , snake_case_ : List[Any]="</s>" , snake_case_ : Optional[int]="</s>" , snake_case_ : List[str]="<s>" , snake_case_ : Union[str, Any]="<unk>" , snake_case_ : Any="<pad>" , snake_case_ : str="<mask>" , snake_case_ : Union[str, Any]=False , snake_case_ : Optional[Any]=True , **snake_case_ : Optional[int] , ) -> int:
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _A ) != add_prefix_space:
A__ = getattr(_A , pre_tok_state.pop("type" ) )
A__ = add_prefix_space
A__ = pre_tok_class(**_A )
A__ = add_prefix_space
A__ = 'post_processor'
A__ = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
A__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A__ = tuple(state["sep"] )
if "cls" in state:
A__ = tuple(state["cls"] )
A__ = False
if state.get("add_prefix_space" , _A ) != add_prefix_space:
A__ = add_prefix_space
A__ = True
if state.get("trim_offsets" , _A ) != trim_offsets:
A__ = trim_offsets
A__ = True
if changes_to_apply:
A__ = getattr(_A , state.pop("type" ) )
A__ = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __magic_name__ ( self : Optional[int] ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __magic_name__ ( self : Union[str, Any] , snake_case_ : List[Any] ) -> List[Any]:
'''simple docstring'''
A__ = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
A__ = value
def __magic_name__ ( self : Tuple , *snake_case_ : int , **snake_case_ : Union[str, Any] ) -> BatchEncoding:
'''simple docstring'''
A__ = kwargs.get("is_split_into_words" , _A )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_A , **_A )
def __magic_name__ ( self : Any , *snake_case_ : Union[str, Any] , **snake_case_ : Union[str, Any] ) -> BatchEncoding:
'''simple docstring'''
A__ = kwargs.get("is_split_into_words" , _A )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_A , **_A )
def __magic_name__ ( self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
A__ = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def __magic_name__ ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ) -> Tuple:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __magic_name__ ( self : Optional[int] , snake_case_ : "Conversation" ) -> List[int]:
'''simple docstring'''
A__ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(_A )
A__ = ' '.join(_A )
A__ = self.encode(_A )
if len(_A ) > self.model_max_length:
A__ = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 247 | """simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Optional[Any] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase : Dict = """CLIPImageProcessor"""
UpperCAmelCase : Dict = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__(self : Union[str, Any] , _A : Dict=None , _A : Tuple=None , **_A : Optional[int]) -> Optional[int]:
__snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _A , )
__snake_case : List[Any] = kwargs.pop('feature_extractor')
__snake_case : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_A , _A)
def __call__(self : Dict , _A : Tuple=None , _A : Optional[int]=None , _A : Tuple=None , **_A : Any) -> int:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
__snake_case : List[str] = self.tokenizer(_A , return_tensors=_A , **_A)
if images is not None:
__snake_case : Any = self.image_processor(_A , return_tensors=_A , **_A)
if text is not None and images is not None:
__snake_case : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A) , tensor_type=_A)
def _lowercase (self : List[Any] , *_A : Dict , **_A : int) -> int:
return self.tokenizer.batch_decode(*_A , **_A)
def _lowercase (self : List[Any] , *_A : Union[str, Any] , **_A : Tuple) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A)
@property
def _lowercase (self : Union[str, Any]) -> List[Any]:
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 172 | 0 |
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int ):
'''simple docstring'''
while second != 0:
snake_case_ : List[str] = first & second
first ^= second
snake_case_ : List[str] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[Any] = int(input('Enter the first number: ').strip())
__A : Optional[Any] = int(input('Enter the second number: ').strip())
print(F'{add(first, second) = }') | 368 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowercase : Dict = StableDiffusionInpaintPipeline
lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase : Optional[int] = frozenset([] )
def a__ ( self :Any ):
torch.manual_seed(0 )
snake_case_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=9 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCamelCase ,)
snake_case_ : Tuple = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
snake_case_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act="""gelu""" ,projection_dim=5_1_2 ,)
snake_case_ : Tuple = CLIPTextModel(_UpperCamelCase )
snake_case_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self :str ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
snake_case_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case_ : List[str] = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((6_4, 6_4) )
snake_case_ : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(_UpperCamelCase ).startswith("""mps""" ):
snake_case_ : Optional[Any] = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ : Optional[int] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self :Any ):
snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Optional[Any] = self.get_dummy_components()
snake_case_ : Dict = StableDiffusionInpaintPipeline(**_UpperCamelCase )
snake_case_ : List[str] = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ : Union[str, Any] = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ : Tuple = sd_pipe(**_UpperCamelCase ).images
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : Dict = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self :Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self :Tuple ):
snake_case_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase ,safety_checker=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,)
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a__ ( self :Tuple ):
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
snake_case_ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCamelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCamelCase ,)
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,)
snake_case_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a__ ( self :Union[str, Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Dict = PNDMScheduler.from_pretrained(_UpperCamelCase ,subfolder="""scheduler""" )
snake_case_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCamelCase ,safety_checker=_UpperCamelCase ,scheduler=_UpperCamelCase ,torch_dtype=torch.floataa ,)
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : Optional[int] = torch.manual_seed(0 )
snake_case_ : Tuple = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=2 ,output_type="""np""" ,)
snake_case_ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 8 | 0 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
SCREAMING_SNAKE_CASE_:List[str] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self, **lowerCamelCase__ ):
super().__init__(**lowerCamelCase__ )
requires_backends(self, """vision""" )
requires_backends(self, """torch""" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
A : int = {}
A : Any = {}
A : Optional[Any] = {}
# preprocess args
if "points_per_batch" in kwargs:
A : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A : Union[str, Any] = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A : Dict = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A : Optional[int] = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A : Dict = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A : List[Any] = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A : List[Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A : List[str] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A : Optional[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A : Any = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self, lowerCamelCase__, *lowerCamelCase__, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__ ):
return super().__call__(lowerCamelCase__, *lowerCamelCase__, num_workers=lowerCamelCase__, batch_size=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=64, lowerCamelCase__ = 0, lowerCamelCase__ = 512 / 1500, lowerCamelCase__ = 32, lowerCamelCase__ = 1, ):
A : Any = load_image(lowerCamelCase__ )
A : List[Any] = self.image_processor.size["""longest_edge"""]
A , A , A , A : Dict = self.image_processor.generate_crop_boxes(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
A : Optional[Any] = self.image_processor(images=lowerCamelCase__, return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A : Optional[Any] = self.get_inference_context()
with inference_context():
A : List[str] = self._ensure_tensor_on_device(lowerCamelCase__, device=self.device )
A : List[Any] = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A : Dict = image_embeddings
A : Any = grid_points.shape[1]
A : int = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0, lowerCamelCase__, lowerCamelCase__ ):
A : Optional[int] = grid_points[:, i : i + points_per_batch, :, :]
A : Optional[Any] = input_labels[:, i : i + points_per_batch]
A : Optional[int] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=0.88, lowerCamelCase__=0.95, lowerCamelCase__=0, lowerCamelCase__=1, ):
A : Union[str, Any] = model_inputs.pop("""input_boxes""" )
A : int = model_inputs.pop("""is_last""" )
A : List[str] = model_inputs.pop("""original_sizes""" ).tolist()
A : List[str] = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A : List[str] = self.model(**lowerCamelCase__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A : Tuple = model_outputs["""pred_masks"""]
A : Optional[Any] = self.image_processor.post_process_masks(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, binarize=lowerCamelCase__ )
A : List[Any] = model_outputs["""iou_scores"""]
A , A , A : Optional[Any] = self.image_processor.filter_masks(
masks[0], iou_scores[0], original_sizes[0], input_boxes[0], lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__=0.7, ):
A : Dict = []
A : Dict = []
A : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A : int = torch.cat(lowerCamelCase__ )
A : Dict = torch.cat(lowerCamelCase__ )
A , A , A , A : Tuple = self.image_processor.post_process_for_mask_generation(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
A : str = defaultdict(lowerCamelCase__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCamelCase__ )
A : str = {}
if output_rle_mask:
A : Tuple = rle_mask
if output_bboxes_mask:
A : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 116 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = 1
__UpperCamelCase = 3
__UpperCamelCase = (3_2, 3_2)
__UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase )
return image
@property
def __lowerCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
return model
@property
def __lowerCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __lowerCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowercase )
@property
def __lowerCamelCase ( self ) -> Tuple:
def extract(*lowercase , **lowercase ):
class UpperCAmelCase__ :
def __init__( self ) -> Tuple:
__UpperCamelCase = torch.ones([0] )
def __lowerCamelCase ( self , lowercase ) -> List[str]:
self.pixel_values.to(lowercase )
return self
return Out()
return extract
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowercase )
assert isinstance(lowercase , lowercase )
assert isinstance(pipe.scheduler , lowercase )
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase )
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
__UpperCamelCase = unet.half()
__UpperCamelCase = vae.half()
__UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
__UpperCamelCase = 4_0_0_3_6_6_0_3_4_6
__UpperCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity"""
__UpperCamelCase = 2_7_3_4_9_7_1_7_5_5
__UpperCamelCase = 7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
__UpperCamelCase = 1_0_4_4_3_5_5_2_3_4
__UpperCamelCase = 1_2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Any = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = SpeechTaTokenizer
__UpperCamelCase = False
__UpperCamelCase = True
def _lowerCAmelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase = SpeechTaTokenizer(_a )
lowerCamelCase = AddedToken("""<mask>""" , lstrip=_a , rstrip=_a )
lowerCamelCase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = """this is a test"""
lowerCamelCase = """this is a test"""
return input_text, output_text
def _lowerCAmelCase ( self , _a , _a=False , _a=20 , _a=5 ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.get_input_output_texts(_a )
lowerCamelCase = tokenizer.encode(_a , add_special_tokens=_a )
lowerCamelCase = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """<pad>"""
lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(_a ) , 81 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCamelCase = tokenizer.vocab_size
lowerCamelCase = len(_a )
self.assertNotEqual(_a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCamelCase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
lowerCamelCase = tokenizer.add_tokens(_a )
lowerCamelCase = tokenizer.vocab_size
lowerCamelCase = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size + len(_a ) )
lowerCamelCase = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCamelCase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
lowerCamelCase = tokenizer.add_special_tokens(_a )
lowerCamelCase = tokenizer.vocab_size
lowerCamelCase = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size_a + len(_a ) )
lowerCamelCase = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(_a , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
lowerCamelCase = tokenizer.convert_tokens_to_ids(_a )
# fmt: off
self.assertListEqual(_a , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
lowerCamelCase = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Use custom sequence because this tokenizer does not handle numbers.
lowerCamelCase = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
lowerCamelCase = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=_a , )
| 168 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_input_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = num_choices
lowerCamelCase = scope
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_input_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , use_stable_embedding=_a , )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = OpenLlamaModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a )
lowerCamelCase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = True
lowerCamelCase = OpenLlamaModel(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , )
lowerCamelCase = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = OpenLlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = OpenLlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , )
lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )["""hidden_states"""][0]
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["""hidden_states"""][0]
# select random slice
lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = OpenLlamaModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a , hidden_size=37 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase = type
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(_a )
lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase = OpenLlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = """single_label_classification"""
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(_a )
lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase = OpenLlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = """multi_label_classification"""
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(_a )
lowerCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase = OpenLlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase = OpenLlamaModel(_a )
original_model.to(_a )
original_model.eval()
lowerCamelCase = original_model(_a ).last_hidden_state
lowerCamelCase = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase = {"""type""": scaling_type, """factor""": 10.0}
lowerCamelCase = OpenLlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
lowerCamelCase = scaled_model(_a ).last_hidden_state
lowerCamelCase = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
| 168 | 1 |
"""simple docstring"""
from collections.abc import Callable
class snake_case :
def __init__( self , __UpperCAmelCase = None) ->None:
# Stores actual heap items.
a_ = []
# Stores indexes of each item for supporting updates and deletion.
a_ = {}
# Stores current size of heap.
a_ = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
a_ = key or (lambda __UpperCAmelCase: x)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->int | None:
return int((i - 1) / 2) if i > 0 else None
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->int | None:
a_ = int(2 * i + 1)
return left if 0 < left < self.size else None
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->int | None:
a_ = int(2 * i + 2)
return right if 0 < right < self.size else None
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->None:
a_ = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
a_ = self.arr[j], self.arr[i]
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->bool:
return self.arr[i][1] < self.arr[j][1]
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->int:
a_ = self._left(__lowercase)
a_ = self._right(__lowercase)
a_ = i
if left is not None and not self._cmp(__lowercase , __lowercase):
a_ = left
if right is not None and not self._cmp(__lowercase , __lowercase):
a_ = right
return valid_parent
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->None:
a_ = self._parent(__lowercase)
while parent is not None and not self._cmp(__lowercase , __lowercase):
self._swap(__lowercase , __lowercase)
a_ = parent, self._parent(__lowercase)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->None:
a_ = self._get_valid_parent(__lowercase)
while valid_parent != index:
self._swap(__lowercase , __lowercase)
a_ = valid_parent, self._get_valid_parent(__lowercase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->None:
if item not in self.pos_map:
return
a_ = self.pos_map[item]
a_ = [item, self.key(__lowercase)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__lowercase)
self._heapify_down(__lowercase)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->None:
if item not in self.pos_map:
return
a_ = self.pos_map[item]
del self.pos_map[item]
a_ = self.arr[self.size - 1]
a_ = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__lowercase)
self._heapify_down(__lowercase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->None:
a_ = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(__lowercase)])
else:
a_ = [item, self.key(__lowercase)]
a_ = self.size
self.size += 1
self._heapify_up(self.size - 1)
def UpperCAmelCase__ ( self) ->tuple | None:
return self.arr[0] if self.size else None
def UpperCAmelCase__ ( self) ->tuple | None:
a_ = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod() | 243 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
a_ = 5_0_0_0_3
a_ = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = PLBartTokenizer
snake_case_ = None
snake_case_ = False
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : List[Any] =PLBartTokenizer(__lowercase , language_codes='''base''' , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =PLBartTokenizer(__lowercase , language_codes='''base''' , keep_accents=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE__ : Any =tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Dict =[tokenizer.convert_ids_to_tokens(__lowercase ) for x in range(end - 4 , __lowercase )]
self.assertListEqual(__lowercase , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
SCREAMING_SNAKE_CASE__ : str ='''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer(__lowercase ).input_ids
self.assertEqual(
tokenizer.decode(__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase ) , __lowercase , )
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int =PLBartTokenizer(__lowercase , language_codes='''multi''' , keep_accents=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : List[str] =[tokenizer.convert_ids_to_tokens(__lowercase ) for x in range(end - 7 , __lowercase )]
self.assertListEqual(
__lowercase , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
SCREAMING_SNAKE_CASE__ : Any ='''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer(__lowercase ).input_ids
self.assertEqual(
tokenizer.decode(__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase ) , __lowercase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = """uclanlp/plbart-python-en_XX"""
snake_case_ = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
snake_case_ = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
snake_case_ = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def __magic_name__ ( cls : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : PLBartTokenizer =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
SCREAMING_SNAKE_CASE__ : int =1
return cls
def __magic_name__ ( self : int ) -> Tuple:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 5_00_03 )
def __magic_name__ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
def __magic_name__ ( self : List[str] ) -> Tuple:
self.assertIn(__lowercase , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
SCREAMING_SNAKE_CASE__ : int =self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertNotIn(self.tokenizer.eos_token , __lowercase )
def __magic_name__ ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple =['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , __lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =10
SCREAMING_SNAKE_CASE__ : Tuple =self.tokenizer(__lowercase , max_length=__lowercase , truncation=__lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowercase )
self.assertEqual(len(__lowercase ) , __lowercase )
def __magic_name__ ( self : Any ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [5_00_04, 5_00_01] )
def __magic_name__ ( self : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =PLBartTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowercase )
@require_torch
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowercase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : str =shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __lowercase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __magic_name__ ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ : Dict =shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __magic_name__ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.tokenizer(self.src_text , padding=__lowercase , truncation=__lowercase , max_length=3 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.tokenizer(
text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=10 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : Dict =targets['''input_ids''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =shift_tokens_right(__lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ : str =self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(__lowercase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[1_50, 2_42, 2, 5_00_03]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 5_00_01,
} , ) | 152 | 0 |
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = len(_UpperCamelCase )
while cur > 1:
# Find the maximum number in arr
SCREAMING_SNAKE_CASE_: Any = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
SCREAMING_SNAKE_CASE_: Union[str, Any] = arr[mi::-1] + arr[mi + 1 : len(_UpperCamelCase )]
# Reverse whole list
SCREAMING_SNAKE_CASE_: Tuple = arr[cur - 1 :: -1] + arr[cur : len(_UpperCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : Dict = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 368 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase : List[str] = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def A_ ( _UpperCAmelCase=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_: Optional[Any] = subparsers.add_parser("tpu-config" , description=_description )
else:
SCREAMING_SNAKE_CASE_: Optional[int] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
SCREAMING_SNAKE_CASE_: Any = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_UpperCAmelCase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_UpperCAmelCase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
SCREAMING_SNAKE_CASE_: Tuple = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_UpperCAmelCase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
SCREAMING_SNAKE_CASE_: Any = defaults.command_file
if not args.command and defaults.commands is not None:
SCREAMING_SNAKE_CASE_: int = defaults.commands
if not args.tpu_name:
SCREAMING_SNAKE_CASE_: List[str] = defaults.tpu_name
if not args.tpu_zone:
SCREAMING_SNAKE_CASE_: Dict = defaults.tpu_zone
if args.accelerate_version == "dev":
SCREAMING_SNAKE_CASE_: Tuple = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
SCREAMING_SNAKE_CASE_: str = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = f"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
SCREAMING_SNAKE_CASE_: Dict = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
SCREAMING_SNAKE_CASE_: Dict = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [f"pip install {args.accelerate_version}"]
new_cmd += args.command
SCREAMING_SNAKE_CASE_: List[str] = "; ".join(_UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
SCREAMING_SNAKE_CASE_: int = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"Running {' '.join(_UpperCAmelCase )}" )
return
subprocess.run(_UpperCAmelCase )
print("Successfully setup pod." )
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[str] = tpu_command_parser()
SCREAMING_SNAKE_CASE_: List[Any] = parser.parse_args()
tpu_command_launcher(_UpperCAmelCase )
| 127 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase_ = {
'facebook/xglm-564M': 2_048,
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None:
"""simple docstring"""
lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : Any = 7
lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
lowercase__ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[int] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase__ : List[str] = len(self.sp_model )
lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_snake_case )
lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = self.__dict__.copy()
lowercase__ : Optional[int] = None
lowercase__ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : int = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : Dict = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case ))
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,'''wb''' ) as fi:
lowercase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 16 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __A ( )-> Tuple:
"""simple docstring"""
raise RuntimeError('CUDA out of memory.' )
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(3 , 4 )
_UpperCAmelCase = nn.BatchNormad(4 )
_UpperCAmelCase = nn.Linear(4 , 5 )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase ) ) )
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_UpperCAmelCase , _UpperCAmelCase = mock_training_loop_function('hello' )
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCAmelCase ):
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = torch.cuda.memory_allocated()
_UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCAmelCase )
_UpperCAmelCase = release_memory(UpperCAmelCase )
self.assertEqual(torch.cuda.memory_allocated() , UpperCAmelCase )
| 39 | 0 |
import copy
import re
class lowerCamelCase_ :
'''simple docstring'''
a__ : Optional[int] = """hp"""
a__ : Any = {}
a__ : List[str] = None
@classmethod
def UpperCamelCase__ ( cls , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[str] = prefix
__UpperCamelCase :Optional[Any] = defaults
cls.build_naming_info()
@staticmethod
def UpperCamelCase__ ( __lowercase , __lowercase) -> List[str]:
if len(__lowercase) == 0:
return ""
__UpperCamelCase :List[str] = None
if any(char.isdigit() for char in word):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""")
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__lowercase) + 1):
__UpperCamelCase :Dict = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__UpperCamelCase :Dict = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__lowercase):
__UpperCamelCase :List[str] = ''''''
while integer != 0:
__UpperCamelCase :List[str] = chr(ord('''A''') + integer % 10) + s
integer //= 10
return s
__UpperCamelCase :str = 0
while True:
__UpperCamelCase :List[str] = word + '''#''' + int_to_alphabetic(__lowercase)
if sword in info["reverse_short_word"]:
continue
else:
__UpperCamelCase :List[Any] = sword
break
__UpperCamelCase :Any = short_word
__UpperCamelCase :int = word
return short_word
@staticmethod
def UpperCamelCase__ ( __lowercase , __lowercase) -> Optional[int]:
__UpperCamelCase :Any = param_name.split('''_''')
__UpperCamelCase :Optional[int] = [TrialShortNamer.shortname_for_word(__lowercase , __lowercase) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__UpperCamelCase :Any = ['''''', '''_''']
for separator in separators:
__UpperCamelCase :List[Any] = separator.join(__lowercase)
if shortname not in info["reverse_short_param"]:
__UpperCamelCase :str = shortname
__UpperCamelCase :Tuple = param_name
return shortname
return param_name
@staticmethod
def UpperCamelCase__ ( __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[Any] = TrialShortNamer.shortname_for_key(__lowercase , __lowercase)
__UpperCamelCase :int = short_name
__UpperCamelCase :List[Any] = param_name
@classmethod
def UpperCamelCase__ ( cls) -> int:
if cls.NAMING_INFO is not None:
return
__UpperCamelCase :Optional[Any] = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
__UpperCamelCase :Dict = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(__lowercase , __lowercase)
__UpperCamelCase :Any = info
@classmethod
def UpperCamelCase__ ( cls , __lowercase) -> Union[str, Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
__UpperCamelCase :Optional[int] = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""")
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__UpperCamelCase :Tuple = cls.NAMING_INFO['''short_param'''][k]
if isinstance(__lowercase , __lowercase):
__UpperCamelCase :List[Any] = 1 if v else 0
__UpperCamelCase :Optional[int] = '''''' if isinstance(__lowercase , (int, float)) else '''-'''
__UpperCamelCase :Dict = f"""{key}{sep}{v}"""
name.append(__lowercase)
return "_".join(__lowercase)
@classmethod
def UpperCamelCase__ ( cls , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = repr[len(cls.PREFIX) + 1 :]
if repr == "":
__UpperCamelCase :Dict = []
else:
__UpperCamelCase :List[Any] = repr.split('''_''')
__UpperCamelCase :List[str] = {}
for value in values:
if "-" in value:
__UpperCamelCase , __UpperCamelCase :List[str] = value.split('''-''')
else:
__UpperCamelCase :Union[str, Any] = re.sub('''[0-9.]''' , '''''' , __lowercase)
__UpperCamelCase :int = float(re.sub('''[^0-9.]''' , '''''' , __lowercase))
__UpperCamelCase :int = cls.NAMING_INFO['''reverse_short_param'''][p_k]
__UpperCamelCase :List[str] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__UpperCamelCase :str = cls.DEFAULTS[k]
return parameters
| 105 | import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__lowercase = '''bert-base-cased'''
__lowercase = '''google/pegasus-xsum'''
__lowercase = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
__lowercase = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
__lowercase = '''patrickvonplaten/t5-tiny-random'''
__lowercase = '''sshleifer/bart-tiny-random'''
__lowercase = '''sshleifer/tiny-mbart'''
__lowercase = '''sshleifer/tiny-marian-en-de'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = '''\n'''.join(SCREAMING_SNAKE_CASE )
Path(SCREAMING_SNAKE_CASE ).open('''w''' ).writelines(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE , f"""{split}.source""" ) , SCREAMING_SNAKE_CASE )
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE , f"""{split}.target""" ) , SCREAMING_SNAKE_CASE )
return tmp_dir
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :Dict = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__UpperCamelCase :List[Any] = max(len(tokenizer.encode(__lowercase)) for a in ARTICLES)
__UpperCamelCase :Optional[int] = max(len(tokenizer.encode(__lowercase)) for a in SUMMARIES)
__UpperCamelCase :int = 4
__UpperCamelCase :Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__UpperCamelCase , __UpperCamelCase :Tuple = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
__UpperCamelCase :str = SeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=__lowercase , max_target_length=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , )
__UpperCamelCase :Any = DataLoader(__lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert isinstance(__lowercase , __lowercase)
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__UpperCamelCase :Optional[int] = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id)
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED])
def UpperCamelCase__ ( self , __lowercase) -> int:
__UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__UpperCamelCase :int = max(len(tokenizer.encode(__lowercase)) for a in ARTICLES)
__UpperCamelCase :Dict = max(len(tokenizer.encode(__lowercase)) for a in SUMMARIES)
__UpperCamelCase :Union[str, Any] = 4
__UpperCamelCase :List[str] = LegacySeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=20 , max_target_length=__lowercase , )
__UpperCamelCase :Dict = DataLoader(__lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''')
__UpperCamelCase :Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
__UpperCamelCase :str = tmp_dir.joinpath('''train.source''').open().readlines()
__UpperCamelCase :int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
pack_data_dir(__lowercase , __lowercase , 128 , __lowercase)
__UpperCamelCase :Union[str, Any] = {x.name for x in tmp_dir.iterdir()}
__UpperCamelCase :int = {x.name for x in save_dir.iterdir()}
__UpperCamelCase :Optional[int] = save_dir.joinpath('''train.source''').open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__lowercase) < len(__lowercase)
assert len(__lowercase) == 1
assert len(packed_examples[0]) == sum(len(__lowercase) for x in orig_examples)
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''')
def UpperCamelCase__ ( self) -> List[Any]:
if not FAIRSEQ_AVAILABLE:
return
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = self._get_dataset(max_len=64)
__UpperCamelCase :Union[str, Any] = 64
__UpperCamelCase :Tuple = ds.make_dynamic_sampler(__lowercase , required_batch_size_multiple=__lowercase)
__UpperCamelCase :List[str] = [len(__lowercase) for x in batch_sampler]
assert len(set(__lowercase)) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__lowercase) == len(__lowercase) # no dropped or added examples
__UpperCamelCase :int = DataLoader(__lowercase , batch_sampler=__lowercase , collate_fn=ds.collate_fn , num_workers=2)
__UpperCamelCase :List[str] = []
__UpperCamelCase :int = []
for batch in data_loader:
__UpperCamelCase :List[Any] = batch['''input_ids'''].shape
__UpperCamelCase :Dict = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__UpperCamelCase :Optional[int] = np.product(batch['''input_ids'''].shape)
num_src_per_batch.append(__lowercase)
if num_src_tokens > (max_tokens * 1.1):
failures.append(__lowercase)
assert num_src_per_batch[0] == max(__lowercase)
if failures:
raise AssertionError(f"""too many tokens in {len(__lowercase)} batches""")
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = self._get_dataset(max_len=512)
__UpperCamelCase :Any = 2
__UpperCamelCase :List[Any] = ds.make_sortish_sampler(__lowercase , shuffle=__lowercase)
__UpperCamelCase :List[Any] = DataLoader(__lowercase , batch_size=__lowercase , collate_fn=ds.collate_fn , num_workers=2)
__UpperCamelCase :Tuple = DataLoader(__lowercase , batch_size=__lowercase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__lowercase)
__UpperCamelCase :int = tokenizer.pad_token_id
def count_pad_tokens(__lowercase , __lowercase="input_ids"):
return [batch[k].eq(__lowercase).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__lowercase , k='''labels''')) < sum(count_pad_tokens(__lowercase , k='''labels'''))
assert sum(count_pad_tokens(__lowercase)) < sum(count_pad_tokens(__lowercase))
assert len(__lowercase) == len(__lowercase)
def UpperCamelCase__ ( self , __lowercase=1_000 , __lowercase=128) -> List[Any]:
if os.getenv('''USE_REAL_DATA''' , __lowercase):
__UpperCamelCase :Optional[Any] = '''examples/seq2seq/wmt_en_ro'''
__UpperCamelCase :Dict = max_len * 2 * 64
if not Path(__lowercase).joinpath('''train.len''').exists():
save_len_file(__lowercase , __lowercase)
else:
__UpperCamelCase :Union[str, Any] = '''examples/seq2seq/test_data/wmt_en_ro'''
__UpperCamelCase :Optional[int] = max_len * 4
save_len_file(__lowercase , __lowercase)
__UpperCamelCase :str = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :List[Any] = SeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=__lowercase , max_target_length=__lowercase , n_obs=__lowercase , )
return ds, max_tokens, tokenizer
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = self._get_dataset()
__UpperCamelCase :List[str] = set(DistributedSortishSampler(__lowercase , 256 , num_replicas=2 , rank=0 , add_extra_examples=__lowercase))
__UpperCamelCase :Tuple = set(DistributedSortishSampler(__lowercase , 256 , num_replicas=2 , rank=1 , add_extra_examples=__lowercase))
assert idsa.intersection(__lowercase) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained(__lowercase , use_fast=__lowercase)
if tok_name == MBART_TINY:
__UpperCamelCase :Optional[Any] = SeqaSeqDataset(
__lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
__UpperCamelCase :Tuple = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__UpperCamelCase :Tuple = SeqaSeqDataset(
__lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
__UpperCamelCase :Optional[int] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__lowercase) == 1 if tok_name == BART_TINY else len(__lowercase) == 0
| 105 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__a : List[Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = 'sshleifer/tiny-gpt2'
__a : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
__a : List[Any] = TensorFlowBenchmark(__a )
__a : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = 'sgugger/tiny-distilbert-classification'
__a : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , only_pretrain_model=__a , )
__a : Dict = TensorFlowBenchmark(__a )
__a : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = 'sshleifer/tiny-gpt2'
__a : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : str = TensorFlowBenchmark(__a )
__a : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = 'sshleifer/tiny-gpt2'
__a : Optional[Any] = AutoConfig.from_pretrained(__a )
__a : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
__a : Tuple = TensorFlowBenchmark(__a , [config] )
__a : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = 'sshleifer/tiny-gpt2'
__a : Optional[int] = AutoConfig.from_pretrained(__a )
__a : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : Tuple = TensorFlowBenchmark(__a , [config] )
__a : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = 'sshleifer/tiny-gpt2'
__a : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : Optional[Any] = TensorFlowBenchmark(__a )
__a : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 'sshleifer/tiny-gpt2'
__a : Tuple = AutoConfig.from_pretrained(__a )
__a : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : Optional[Any] = TensorFlowBenchmark(__a , [config] )
__a : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'patrickvonplaten/t5-tiny-random'
__a : Optional[int] = AutoConfig.from_pretrained(__a )
__a : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : str = TensorFlowBenchmark(__a , configs=[config] )
__a : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'sshleifer/tiny-gpt2'
__a : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__a , multi_process=__a , )
__a : int = TensorFlowBenchmark(__a )
__a : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , save_to_csv=__a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(__a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(__a , 'env.csv' ) , multi_process=__a , )
__a : int = TensorFlowBenchmark(__a )
benchmark.run()
self.assertTrue(Path(os.path.join(__a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'env.csv' ) ).exists() )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__a ):
self.assertTrue(hasattr(__a , 'sequential' ) )
self.assertTrue(hasattr(__a , 'cumulative' ) )
self.assertTrue(hasattr(__a , 'current' ) )
self.assertTrue(hasattr(__a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__a , 'log.txt' ) , log_print=__a , trace_memory_line_by_line=__a , eager_mode=__a , multi_process=__a , )
__a : Any = TensorFlowBenchmark(__a )
__a : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__a , 'log.txt' ) ).exists() )
| 27 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = inspect.getfile(accelerate.test_utils )
__a : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__a : Union[str, Any] = test_metrics
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices.""" )
__a : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 27 | 1 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :float , a_ :float , a_ :float , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
raise ValueError('''You cannot supply more or less than 2 values''')
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''')
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''')
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''')
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 188 |
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __A ( a_ :List[Any]=None , a_ :Tuple=None) -> List[Any]:
return field(default_factory=lambda: default , metadata=a_)
@dataclass
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = field(
metadata={'''help''': '''The csv file to plot.'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
__lowerCAmelCase = list_field(
default=_UpperCamelCase , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def __A ( a_ :Optional[Any]) -> Any:
try:
int(a_)
return True
except ValueError:
return False
def __A ( a_ :List[Any]) -> Any:
try:
float(a_)
return True
except ValueError:
return False
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
__a : Dict = args
__a : Tuple = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
__a : int = csv.DictReader(_UpperCAmelCase )
for row in reader:
__a : Union[str, Any] = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
__a : Optional[int] = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
__a : Optional[Any] = float(row['''result'''] )
def _lowerCamelCase ( self ):
__a , __a : Optional[int] = plt.subplots()
__a : str = '''Time usage''' if self.args.is_time else '''Memory usage'''
__a : str = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__a : str = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
__a : Dict = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
__a : Dict = self.result_dict[model_name]['''result''']
((__a) , (__a)) : List[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__a : Any = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__a : Optional[int] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_UpperCAmelCase , )
else:
__a : Dict = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__a) , (__a)) : Union[str, Any] = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
__a : Any = np.asarray(_UpperCAmelCase , _UpperCAmelCase )[: len(_UpperCAmelCase )]
plt.scatter(
_UpperCAmelCase , _UpperCAmelCase , label=f"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(_UpperCAmelCase , _UpperCAmelCase , '''--''' )
title_str += f""" {label_model_name} vs."""
__a : Optional[Any] = title_str[:-4]
__a : Optional[int] = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_UpperCAmelCase )
plt.xlabel(_UpperCAmelCase )
plt.ylabel(_UpperCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __A ( ) -> List[str]:
__a : List[str] = HfArgumentParser(a_)
__a : Optional[int] = parser.parse_args_into_dataclasses()[0]
__a : Tuple = Plot(args=a_)
plot.plot()
if __name__ == "__main__":
main() | 188 | 1 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Union[str, Any]:
_snake_case = set()
_snake_case = []
def parse_line(__A ):
for line in fp:
if isinstance(__A , __A ):
_snake_case = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(__A ) > 0:
_snake_case = '\n'.join(__A )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(__A )
buffer.clear()
continue
else:
_snake_case = line.strip()
buffer.append(__A )
if from_gh:
for filename in os.listdir(__A ):
_snake_case = os.path.join(__A , __A )
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with open(__A ) as fp:
parse_line(__A )
else:
try:
with zipfile.ZipFile(__A ) as z:
for filename in z.namelist():
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__A ) as fp:
parse_line(__A )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Optional[int]:
_snake_case = set()
_snake_case = [os.path.join(__A , __A ) for p in os.listdir(__A ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__A , __A ) )
return selected_warnings
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[str]:
return values.split(',' )
lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
lowercase : Tuple = parser.parse_args()
lowercase : Optional[int] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase : List[Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase : Optional[Any] = extract_warnings(args.output_dir, args.targets)
lowercase : Tuple = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 42 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase: List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class a__( lowerCamelCase__ ):
def __init__( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=1 ):
a : Union[str, Any] = tokenizer
a : Union[str, Any] = dataset
a : Any = len(__snake_case ) if n_tasks is None else n_tasks
a : List[str] = n_copies
def __iter__( self : str ):
a : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
a : Dict = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str ):
a : Dict = start_length
a : Dict = eof_strings
a : str = tokenizer
def __call__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : Union[str, Any] ):
a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__snake_case )
def lowerCamelCase__ ( _A ):
a : Optional[Any] = re.split('(%s)' % '|'.join(_A ) , _A )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A=20 , **_A ):
a : Optional[Any] = defaultdict(_A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_A ) ):
with torch.no_grad():
a : Optional[Any] = batch['ids'].shape[-1]
a : Optional[Any] = accelerator.unwrap_model(_A ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A )
# each task is generated batch_size times
a : Tuple = batch['task_id'].repeat(_A )
a : List[Any] = accelerator.pad_across_processes(
_A , dim=1 , pad_index=tokenizer.pad_token_id )
a , a : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
a : List[str] = generated_tokens.cpu().numpy()
a : int = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_A , _A ):
gen_token_dict[task].append(_A )
a : Any = [[] for _ in range(_A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a : Optional[int] = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
code_gens[task].append(remove_last_block(_A ) )
return code_gens
def lowerCamelCase__ ( ):
# Setup configuration
a : Dict = HfArgumentParser(_A )
a : Any = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a : List[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a : int = 'false'
if args.num_workers is None:
a : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_A )
# Load model and tokenizer
a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
a : str = tokenizer.eos_token
a : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ),
}
# Load evaluation dataset and metric
a : Optional[int] = load_dataset('openai_humaneval' )
a : Optional[Any] = load_metric('code_eval' )
a : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
a : Optional[Any] = args.n_samples // args.batch_size
a : Any = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a : int = DataLoader(_A , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a : int = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
a , a : int = accelerator.prepare(_A , _A )
a : int = complete_code(
_A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , )
if accelerator.is_main_process:
a : List[str] = []
for task in tqdm(range(_A ) ):
a : int = human_eval['test'][task]['test']
a : int = f"""check({human_eval["test"][task]["entry_point"]})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
a , a : Tuple = code_eval_metric.compute(
references=_A , predictions=_A , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_A , _A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 297 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__SCREAMING_SNAKE_CASE) , "Tatoeba directory does not exist.")
class _snake_case (unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_snake_case )
@slow
def UpperCamelCase__ ( self ):
self.resolver.convert_models(["heb-eng"] )
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : str = self.resolver.write_model_card("opus-mt-he-en" ,dry_run=_snake_case )
assert mmeta["long_pair"] == "heb-eng"
| 67 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def a__ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 67 | 1 |
import mpmath # for roots of unity
import numpy as np
class _a :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : Any=None , UpperCAmelCase : Dict=None ):
# Input as list
A_ = list(poly_a or [0] )[:]
A_ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
A_ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
A_ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
A_ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
A_ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
A_ = self.__multiply()
def __A ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] ):
A_ = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
A_ = self.c_max_length // 2
while next_ncol > 0:
A_ = [[] for i in range(UpperCAmelCase )]
A_ = self.root**next_ncol
# First half of next step
A_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
A_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
A_ = new_dft
A_ = next_ncol // 2
return dft[0]
def __A ( self : Dict ):
A_ = self.__dft("A" )
A_ = self.__dft("B" )
A_ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
A_ = 2
while next_ncol <= self.c_max_length:
A_ = [[] for i in range(UpperCAmelCase )]
A_ = self.root ** (next_ncol // 2)
A_ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
A_ = new_inverse_c
next_ncol *= 2
# Unpack
A_ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Union[str, Any] ):
A_ = "A = " + " + ".join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
A_ = "B = " + " + ".join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
A_ = "A*B = " + " + ".join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 312 |
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution()) | 312 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : Optional[int] = logging.getLogger(__name__)
def __A ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class __lowerCAmelCase :
UpperCamelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
UpperCamelCase__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
UpperCamelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase__ = field(
default=__magic_name__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __A ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a , a , a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
try:
a = processors[data_args.task_name]()
a = processor.get_labels()
a = len(__lowerCamelCase )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
a = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__lowerCamelCase ) -> Dict:
a = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__lowerCamelCase , p.label_ids )}
# Data collator
a = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
a = trainer.evaluate()
a = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(__lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , __lowerCamelCase , __lowerCamelCase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(__lowerCamelCase )
return results
def __A ( __lowerCamelCase ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 347 |
from copy import deepcopy
class __lowerCAmelCase :
def __init__( self :Union[str, Any] , __magic_name__ :list[int] | None = None , __magic_name__ :int | None = None ):
'''simple docstring'''
if arr is None and size is not None:
a = size
a = [0] * size
elif arr is not None:
self.init(__magic_name__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def lowerCamelCase__ ( self :Dict , __magic_name__ :list[int] ):
'''simple docstring'''
a = len(__magic_name__ )
a = deepcopy(__magic_name__ )
for i in range(1 , self.size ):
a = self.next_(__magic_name__ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
a = self.next_(__magic_name__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase__ ( __magic_name__ :int ):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def lowerCamelCase__ ( __magic_name__ :int ):
'''simple docstring'''
return index - (index & (-index))
def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
a = self.next_(__magic_name__ )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
self.add(__magic_name__ , value - self.get(__magic_name__ ) )
def lowerCamelCase__ ( self :int , __magic_name__ :int ):
'''simple docstring'''
if right == 0:
return 0
a = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
a = self.prev(__magic_name__ )
return result
def lowerCamelCase__ ( self :int , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
return self.prefix(__magic_name__ ) - self.prefix(__magic_name__ )
def lowerCamelCase__ ( self :Tuple , __magic_name__ :int ):
'''simple docstring'''
return self.query(__magic_name__ , index + 1 )
def lowerCamelCase__ ( self :Dict , __magic_name__ :int ):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
a = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
a = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=30 , A=2 , A=3 , A=True , A=True , A=32 , A=2 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=10 , A=0.0_2 , A=3 , A=None , ) -> Any:
UpperCAmelCase : int = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : Optional[Any] = image_size
UpperCAmelCase : Optional[Any] = patch_size
UpperCAmelCase : str = num_channels
UpperCAmelCase : str = is_training
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : List[Any] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Dict = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Dict = attention_probs_dropout_prob
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Optional[int] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase : int = (image_size // patch_size) ** 2
UpperCAmelCase : Any = num_patches + 1
def _lowercase( self ) -> str:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Optional[Any] = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Any = self.get_config()
return config, pixel_values, labels
def _lowercase( self ) -> Dict:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : Union[str, Any] = TFViTModel(config=A )
UpperCAmelCase : Optional[int] = model(A , training=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase : Any = self.image_size // 2
UpperCAmelCase : Dict = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase : List[Any] = model(A , interpolate_pos_encoding=A , training=A )
UpperCAmelCase : List[str] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowercase( self , A , A , A ) -> int:
UpperCAmelCase : str = self.type_sequence_label_size
UpperCAmelCase : Optional[int] = TFViTForImageClassification(A )
UpperCAmelCase : int = model(A , labels=A , training=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase : Tuple = self.image_size // 2
UpperCAmelCase : Union[str, Any] = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase : Union[str, Any] = model(A , interpolate_pos_encoding=A , training=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : Dict = TFViTForImageClassification(A )
UpperCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase( self ) -> Any:
UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = config_and_inputs
UpperCAmelCase : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowercase = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Tuple = TFViTModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def _lowercase( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def _lowercase( self ) -> Optional[int]:
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def _lowercase( self ) -> Optional[int]:
pass
def _lowercase( self ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , tf.keras.layers.Layer ) )
def _lowercase( self ) -> Any:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(A )
UpperCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> int:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[Any] = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(A )
def __lowerCamelCase ( ) -> Optional[int]:
UpperCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _lowercase( self ) -> Tuple:
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _lowercase( self ) -> List[str]:
UpperCAmelCase : List[Any] = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
UpperCAmelCase : Optional[int] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : str = image_processor(images=A , return_tensors="""tf""" )
# forward pass
UpperCAmelCase : List[str] = model(**A )
# verify the logits
UpperCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
UpperCAmelCase : List[Any] = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , A , atol=1e-4 )
| 265 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCamelCase ( ) -> Any:
raise RuntimeError("""CUDA out of memory.""" )
class UpperCamelCase_ ( nn.Module ):
def __init__( self ) -> Any:
super().__init__()
UpperCAmelCase : Tuple = nn.Linear(3 , 4 )
UpperCAmelCase : Tuple = nn.BatchNormad(4 )
UpperCAmelCase : int = nn.Linear(4 , 5 )
def _lowercase( self , A ) -> Any:
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(A , [128, 64, 32, 16, 8] )
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A ):
nonlocal batch_sizes
batch_sizes.append(A )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" )
self.assertListEqual(A , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def _lowercase( self ) -> Any:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(A ):
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def _lowercase( self ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(A , A , A ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(A ) as cm:
mock_training_loop_function(128 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def _lowercase( self ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(A ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(A ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated()
UpperCAmelCase : List[str] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , A )
UpperCAmelCase : Tuple = release_memory(A )
self.assertEqual(torch.cuda.memory_allocated() , A )
| 265 | 1 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
a = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
a = []
a = []
a = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
a = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
'''emoji''': True,
},
}
]
a = 0
for log in Path().glob('''*.log'''):
a = 0
with open(log, '''r''') as f:
for line in f:
a = json.loads(line)
if line.get('''nodeid''', '''''') != "":
a = line['''nodeid''']
if line.get('''duration''', None) is not None:
a = F'''{line["duration"]:.4f}'''
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
a = []
log.unlink()
a = ''''''
a = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
a = []
a = {}
for test in failed_tests:
a = test[0].split('''::''')
a = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
a = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
a = [test[0] for test in failed_table]
a = list(set(files))
# Count number of instances in failed_tests
a = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
a = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
a = '''Too many failed tests, please see the full report in the Action results.'''
a = len(err) + 10
a = message[: 3_000 - offset] + F'''\n...\n```\n{err}'''
print(F'''### {message}''')
else:
a = '''No failed tests! 🤗'''
print(F'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
a = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
a = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
a = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
a = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
a = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
a = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
a = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
a = row[0]
else:
a = ''''''
a = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 271 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a = logging.getLogger(__name__)
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str
UpperCAmelCase : List[str]
UpperCAmelCase : Optional[List[str]]
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : List[int]
UpperCAmelCase : List[int]
UpperCAmelCase : Optional[List[int]] = None
UpperCAmelCase : Optional[List[int]] = None
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = '''train'''
UpperCAmelCase : Tuple = '''dev'''
UpperCAmelCase : int = '''test'''
class lowercase_ :
'''simple docstring'''
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : str ):
raise NotImplementedError
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : List[InputExample] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : List[str]="[CLS]" , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Tuple="[SEP]" , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : str=0 , _UpperCAmelCase : Optional[int]=0 , _UpperCAmelCase : Any=-100 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=True , ):
_A = {label: i for i, label in enumerate(_UpperCAmelCase )}
_A = []
for ex_index, example in enumerate(_UpperCAmelCase ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' , _UpperCAmelCase , len(_UpperCAmelCase ) )
_A = []
_A = []
for word, label in zip(example.words , example.labels ):
_A = tokenizer.tokenize(_UpperCAmelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_UpperCAmelCase ) > 0:
tokens.extend(_UpperCAmelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_UpperCAmelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_A = tokenizer.num_special_tokens_to_add()
if len(_UpperCAmelCase ) > max_seq_length - special_tokens_count:
_A = tokens[: (max_seq_length - special_tokens_count)]
_A = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_A = [sequence_a_segment_id] * len(_UpperCAmelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_A = [cls_token] + tokens
_A = [pad_token_label_id] + label_ids
_A = [cls_token_segment_id] + segment_ids
_A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_A = [1 if mask_padding_with_zero else 0] * len(_UpperCAmelCase )
# Zero-pad up to the sequence length.
_A = max_seq_length - len(_UpperCAmelCase )
if pad_on_left:
_A = ([pad_token] * padding_length) + input_ids
_A = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_A = ([pad_token_segment_id] * padding_length) + segment_ids
_A = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(_UpperCAmelCase ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(_UpperCAmelCase ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(_UpperCAmelCase ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(_UpperCAmelCase ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(_UpperCAmelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_A = None
features.append(
InputFeatures(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , label_ids=_UpperCAmelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[InputFeatures]
UpperCAmelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _UpperCAmelCase : TokenClassificationTask , _UpperCAmelCase : str , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Split = Split.train , ):
# Load data features from cache or dataset file
_A = os.path.join(
_UpperCAmelCase , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_A = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
_A = torch.load(_UpperCAmelCase )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
_A = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
_A = token_classification_task.convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , _UpperCAmelCase )
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : int , _UpperCAmelCase : Union[str, Any] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : List[InputFeatures]
UpperCAmelCase : int = -100
def __init__( self : int , _UpperCAmelCase : TokenClassificationTask , _UpperCAmelCase : str , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Split = Split.train , ):
_A = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
_A = token_classification_task.convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_A = tf.data.Dataset.from_generator(
_UpperCAmelCase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_A = tf.data.Dataset.from_generator(
_UpperCAmelCase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowerCAmelCase_ ( self : Dict ):
_A = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Dict , _UpperCAmelCase : Optional[int] ):
return self.features[i]
| 271 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : int = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : str = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Dict = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : str = "imagenet-1k-id2label.json"
lowerCAmelCase : Optional[Any] = 1_0_0_0
lowerCAmelCase : Union[str, Any] = "huggingface/label-files"
lowerCAmelCase : int = num_labels
lowerCAmelCase : List[str] = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) ) , "r" ) )
lowerCAmelCase : Tuple = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase : str = idalabel
lowerCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase : str = CvtConfig(num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
lowerCAmelCase : int = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
lowerCAmelCase : Tuple = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCAmelCase : Dict = [2, 2, 2_0]
lowerCAmelCase : Optional[int] = [3, 1_2, 1_6]
lowerCAmelCase : Union[str, Any] = [1_9_2, 7_6_8, 1_0_2_4]
lowerCAmelCase : str = CvtForImageClassification(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
lowerCAmelCase : str = OrderedDict()
lowerCAmelCase : Optional[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCAmelCase : Optional[Any] = list_of_state_dict + cls_token(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = list_of_state_dict + embeddings(SCREAMING_SNAKE_CASE )
for cnt in range(config.depth[idx] ):
lowerCAmelCase : Dict = list_of_state_dict + attention(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 108 | """simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def a_ ( _lowerCAmelCase : Callable[[int | float], int | float] , _lowerCAmelCase : int | float , _lowerCAmelCase : int | float , _lowerCAmelCase : int = 100 , ):
'''simple docstring'''
lowercase__ : Dict = x_start
lowercase__ : Union[str, Any] = fnc(_lowerCAmelCase )
lowercase__ : Optional[Any] = 0.0
for _ in range(_lowerCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ : Union[str, Any] = (x_end - x_start) / steps + xa
lowercase__ : Union[str, Any] = fnc(_lowerCAmelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ : Union[str, Any] = xa
lowercase__ : int = fxa
return length
if __name__ == "__main__":
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
_UpperCamelCase : str = 10
while i <= 10_00_00:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 77 | 0 |
def lowerCAmelCase_ ( __A, __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = ""
for word_or_phrase in separated:
if not isinstance(__A, __A ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(__A )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 143 | from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__(self : Tuple , __UpperCAmelCase : str , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 1_3
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = 9_9
UpperCAmelCase__ = 3_2
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3_7
UpperCAmelCase__ = "gelu"
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 5_1_2
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = None
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModel(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self : str , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertForMaskedLM(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertForQuestionAnswering(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDistilBertForSequenceClassification(__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFDistilBertForMultipleChoice(__UpperCAmelCase )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDistilBertForTokenClassification(__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ (self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__UpperCAmelCase : Optional[int] = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : str = False
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , dim=3_7 )
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ (self : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCAmelCase )
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCAmelCase )
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCAmelCase )
def lowercase_ (self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCAmelCase )
@slow
def lowercase_ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
UpperCAmelCase__ = TFDistilBertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
@slow
def lowercase_ (self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__UpperCAmelCase )[0]
UpperCAmelCase__ = [1, 6, 7_6_8]
self.assertEqual(output.shape , __UpperCAmelCase )
UpperCAmelCase__ = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 )
| 143 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_UpperCamelCase : int = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : tuple , _lowerCAmelCase : Path , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , enable_onnx_checker=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
else:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
@torch.no_grad()
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : bool = False ):
'''simple docstring'''
lowercase__ : List[str] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase__ : Optional[int] = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
lowercase__ : Union[str, Any] = 'cpu'
lowercase__ : str = Path(_lowerCAmelCase )
# VAE DECODER
lowercase__ : str = AutoencoderKL.from_pretrained(model_path + '/vae' )
lowercase__ : Optional[Any] = vae_decoder.config.latent_channels
# forward only through the decoder part
lowercase__ : Optional[Any] = vae_decoder.decode
onnx_export(
_lowerCAmelCase , model_args=(
torch.randn(1 , _lowerCAmelCase , 25 , 25 ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_lowerCAmelCase , )
del vae_decoder
if __name__ == "__main__":
_UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
_UpperCamelCase : Any = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 77 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234 | 0 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
__A =logging.getLogger(__name__)
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Dict = git.Repo(search_parent_directories=UpperCamelCase__ )
UpperCAmelCase__ : Any = {
"""repo_id""": str(UpperCamelCase__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(UpperCamelCase__ , """git_log.json""" ) , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ , indent=4 )
def _UpperCamelCase ( UpperCamelCase__ ):
if params.n_gpu <= 0:
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : Tuple = -1
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : str = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCAmelCase__ : Dict = int(os.environ["""WORLD_SIZE"""] )
UpperCAmelCase__ : Optional[Any] = int(os.environ["""N_GPU_NODE"""] )
UpperCAmelCase__ : Any = int(os.environ["""RANK"""] )
# number of nodes / node ID
UpperCAmelCase__ : str = params.world_size // params.n_gpu_per_node
UpperCAmelCase__ : str = params.global_rank // params.n_gpu_per_node
UpperCAmelCase__ : Dict = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Tuple = 1
UpperCAmelCase__ : List[str] = 1
UpperCAmelCase__ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCAmelCase__ : Dict = params.node_id == 0 and params.local_rank == 0
UpperCAmelCase__ : List[str] = params.n_nodes > 1
# summary
UpperCAmelCase__ : Tuple = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def _UpperCamelCase ( UpperCamelCase__ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed ) | 360 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""")
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained("""google/mt5-small""")
UpperCAmelCase__ : Optional[Any] = tokenizer("""Hello there""" , return_tensors="""tf""").input_ids
UpperCAmelCase__ : Tuple = tokenizer("""Hi I am""" , return_tensors="""tf""").input_ids
UpperCAmelCase__ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase).loss
UpperCAmelCase__ : Tuple = -tf.math.reduce_mean(_lowerCamelCase).numpy()
UpperCAmelCase__ : Optional[int] = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 2e-4) | 283 | 0 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def a (self : int ):
"""simple docstring"""
__snake_case = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
__snake_case = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(_UpperCamelCase )
from datasets import load_dataset
__snake_case = load_dataset('''nielsr/rvlcdip-demo''' )
__snake_case = dataset['''train'''][0]['''image'''].convert('''RGB''' )
__snake_case = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
__snake_case = model(**_UpperCamelCase )
__snake_case = outputs.logits
__snake_case = torch.Size((1, 16) )
self.assertEqual(logits.shape , _UpperCamelCase )
__snake_case = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=_UpperCamelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
| 24 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 | 0 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase :
__lowercase : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
__lowercase : Optional[str] = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
__lowercase : Optional[str] = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
__lowercase : Optional[str] = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
__lowercase : Optional[int] = field(default=2 , metadata={"help": "Batch size for training."} )
__lowercase : Optional[int] = field(default=2 , metadata={"help": "Batch size for evaluation."} )
__lowercase : Optional[float] = field(default=0.1 , metadata={"help": "Value of weight decay."} )
__lowercase : Optional[int] = field(
default=10_000 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
__lowercase : Optional[float] = field(default=2e-4 , metadata={"help": "Learning rate fo training."} )
__lowercase : Optional[str] = field(default="cosine" , metadata={"help": "Learning rate."} )
__lowercase : Optional[int] = field(
default=750 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
__lowercase : Optional[int] = field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
__lowercase : Optional[bool] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
__lowercase : Optional[int] = field(default=50_000 , metadata={"help": "Maximum number of training steps."} )
__lowercase : Optional[int] = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
__lowercase : Optional[int] = field(default=1_024 , metadata={"help": "Sequence lengths used for training."} )
__lowercase : Optional[int] = field(default=1 , metadata={"help": "Training seed."} )
__lowercase : Optional[int] = field(
default=1_024 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
__lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
__lowercase : Optional[bool] = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class lowercase :
__lowercase : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
__lowercase : Optional[str] = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
__lowercase : Optional[int] = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
__lowercase : Optional[int] = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
__lowercase : Optional[int] = field(default=1_024 , metadata={"help": "Length of sequences to be evaluated."} )
__lowercase : Optional[int] = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class lowercase :
__lowercase : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
__lowercase : Optional[int] = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Number of workers used for code evaluation."} )
__lowercase : Optional[int] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
__lowercase : Optional[bool] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Sample from the language model's output distribution."} )
__lowercase : Optional[float] = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
__lowercase : Optional[int] = field(default=256 , metadata={"help": "Maximum number of newly generated tokens."} )
__lowercase : Optional[int] = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
__lowercase : Optional[float] = field(default=0.9_5 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
__lowercase : Optional[int] = field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
__lowercase : Optional[int] = field(
default=200 , metadata={"help": "Number of completions to generate for each sample."} )
__lowercase : Optional[int] = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
__lowercase : Optional[str] = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
__lowercase : Optional[str] = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
__lowercase : Optional[int] = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class lowercase :
__lowercase : Optional[int] = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
__lowercase : Optional[str] = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
__lowercase : Optional[str] = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
__lowercase : Optional[int] = field(
default=100_000 , metadata={"help": "Number of files to save per JSON output file."} )
__lowercase : Optional[str] = field(default="content" , metadata={"help": "Column containing text data to process."} )
__lowercase : Optional[float] = field(
default=1_000 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
__lowercase : Optional[float] = field(
default=100 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
__lowercase : Optional[float] = field(
default=0.2_5 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
__lowercase : Optional[float] = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
__lowercase : Optional[float] = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
__lowercase : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
__lowercase : Optional[bool] = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "If True, near-duplicate samples are removed."} )
__lowercase : Optional[float] = field(
default=0.8_5 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class lowercase :
__lowercase : Optional[str] = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
__lowercase : Optional[str] = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
__lowercase : Optional[str] = field(default="content" , metadata={"help": "Column containing text data to process."} )
__lowercase : Optional[int] = field(default=200_000 , metadata={"help": "Number of examples to train tokenizer on."} )
__lowercase : Optional[int] = field(
default=32_768 , metadata={"help": "Number of examples to train the tokenizer on."} )
__lowercase : Optional[str] = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
__lowercase : Optional[bool] = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class lowercase :
__lowercase : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
__lowercase : Optional[str] = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
__lowercase : Optional[str] = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
__lowercase : Optional[int] = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class lowercase :
__lowercase : Optional[str] = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
__lowercase : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
__lowercase : Optional[str] = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
__lowercase : Optional[bool] = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Push saved tokenizer to the hub."} )
| 366 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 110 | 0 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a_ : Union[str, Any] = "true"
def _A (lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=82 , lowerCAmelCase__ :Any=16 ) -> Optional[Any]:
'''simple docstring'''
set_seed(42 )
_a = RegressionModel()
_a = deepcopy(lowerCAmelCase__ )
_a = RegressionDataset(length=lowerCAmelCase__ )
_a = DataLoader(lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
model.to(accelerator.device )
_a , _a = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
return model, ddp_model, dataloader
def _A (lowerCAmelCase__ :Accelerator , lowerCAmelCase__ :Optional[int]=False ) -> Dict:
'''simple docstring'''
_a = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
_a = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(lowerCAmelCase__ :str ):
_a = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
with accelerator.main_process_first():
_a = dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
_a = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase__ :int ):
if use_longest:
return tokenizer.pad(lowerCAmelCase__ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(lowerCAmelCase__ , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return DataLoader(lowerCAmelCase__ , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=16 )
def _A (lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> int:
'''simple docstring'''
_a = Accelerator(dispatch_batches=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
_a = get_dataloader(lowerCAmelCase__ , not dispatch_batches )
_a = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=lowerCAmelCase__ )
_a , _a = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _A (lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a = []
for batch in dataloader:
_a , _a = batch.values()
with torch.no_grad():
_a = model(lowerCAmelCase__ )
_a , _a = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_a , _a = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCAmelCase__ )
targs.append(lowerCAmelCase__ )
_a , _a = torch.cat(lowerCAmelCase__ ), torch.cat(lowerCAmelCase__ )
return logits, targs
def _A (lowerCAmelCase__ :Accelerator , lowerCAmelCase__ :Optional[Any]=82 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :Any=16 ) -> int:
'''simple docstring'''
_a , _a , _a = get_basic_setup(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_a , _a = generate_predictions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
assert (
len(lowerCAmelCase__ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCAmelCase__ )}'
def _A (lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False ) -> Optional[int]:
'''simple docstring'''
_a = evaluate.load('glue' , 'mrpc' )
_a , _a = get_mrpc_setup(lowerCAmelCase__ , lowerCAmelCase__ )
# First do baseline
_a , _a , _a = setup['no']
model.to(lowerCAmelCase__ )
model.eval()
for batch in dataloader:
batch.to(lowerCAmelCase__ )
with torch.inference_mode():
_a = model(**lowerCAmelCase__ )
_a = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCAmelCase__ , references=batch['labels'] )
_a = metric.compute()
# Then do distributed
_a , _a , _a = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
_a = model(**lowerCAmelCase__ )
_a = outputs.logits.argmax(dim=-1 )
_a = batch['labels']
_a , _a = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
_a = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def _A () -> Tuple:
'''simple docstring'''
_a = Accelerator(split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_a = Accelerator(split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(lowerCAmelCase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
_a = Accelerator()
test_torch_metrics(lowerCAmelCase__ , 5_12 )
accelerator.state._reset_state()
def _A (lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 168 |
'''simple docstring'''
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> float:
'''simple docstring'''
_a = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _A () -> Tuple:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER","False" ) ) is not True,reason="Skipping test because should only be run when releasing minor transformers version",)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_5_0, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Union[str, Any]:
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=_a , )
assert hasattr(self , """env""" )
def a__ ( self , _a=1 ) -> Dict:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def a__ ( self , _a ) -> Any:
TrainingJobAnalytics(_a ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def a__ ( self ) -> str:
# create estimator
_A : str = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_A : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_A : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
_A : str = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _a )
| 362 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_snake_case = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_snake_case = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_snake_case = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_snake_case = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_snake_case = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(UpperCamelCase__ )
class lowercase :
def __call__( self , _a , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , **_a , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
_A : Optional[Any] = titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
_A : Dict = titles if not isinstance(_a , _a ) else [titles]
_A : Tuple = texts if not isinstance(_a , _a ) else [texts]
_A : Any = len(_a )
_A : Optional[Any] = questions if not isinstance(_a , _a ) else [questions] * n_passages
if len(_a ) != len(_a ):
raise ValueError(
F'''There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.''' )
_A : str = super().__call__(_a , _a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["""input_ids"""]
_A : Optional[int] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
_A : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_A : str = attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def a__ ( self , _a , _a , _a = 16 , _a = 64 , _a = 4 , ) -> List[DPRSpanPrediction]:
_A : Dict = reader_input["""input_ids"""]
_A , _A , _A : Tuple = reader_output[:3]
_A : List[str] = len(_a )
_A : Tuple = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
_A : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_A : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_A : int = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_A : Tuple = sequence_ids.index(self.pad_token_id )
else:
_A : Tuple = len(_a )
_A : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self , _a , _a , _a , _a , ) -> List[DPRSpanPrediction]:
_A : Tuple = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_A : Tuple = sorted(_a , key=lambda _a : x[1] , reverse=_a )
_A : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
_A : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = READER_PRETRAINED_VOCAB_FILES_MAP
_a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = READER_PRETRAINED_INIT_CONFIGURATION
_a = ["input_ids", "attention_mask"]
| 343 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :List[Any] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15 |
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case ):
snake_case = name
snake_case = value
snake_case = weight
def __repr__( self ):
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def a_ ( self ):
return self.value
def a_ ( self ):
return self.name
def a_ ( self ):
return self.weight
def a_ ( self ):
return self.value / self.weight
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = []
for i in range(len(UpperCamelCase_ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = sorted(UpperCamelCase_ ,key=UpperCamelCase_ ,reverse=UpperCamelCase_ )
snake_case = []
snake_case , snake_case = 0.0, 0.0
for i in range(len(UpperCamelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase__ ():
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
__A = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Union[PIL.Image.Image, np.ndarray]
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
super().__init__()
self.register_modules(
prior=_UpperCAmelCase , image_encoder=_UpperCAmelCase , image_processor=_UpperCAmelCase , scheduler=_UpperCAmelCase , renderer=_UpperCAmelCase , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if latents is None:
lowercase__: Optional[Any] = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase__: Optional[Any] = latents.to(_UpperCAmelCase )
lowercase__: Dict = latents * scheduler.init_noise_sigma
return latents
def _snake_case ( self , _UpperCAmelCase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__: str = torch.device(F"""cuda:{gpu_id}""" )
lowercase__: List[Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCAmelCase , _UpperCAmelCase )
@property
def _snake_case ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCAmelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(image[0] , torch.Tensor ):
lowercase__: List[Any] = torch.cat(_UpperCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCAmelCase , axis=0 )
if not isinstance(_UpperCAmelCase , torch.Tensor ):
lowercase__: Optional[int] = self.image_processor(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowercase__: str = image.to(dtype=self.image_encoder.dtype , device=_UpperCAmelCase )
lowercase__: str = self.image_encoder(_UpperCAmelCase )['''last_hidden_state''']
lowercase__: Tuple = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase__: Any = image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
lowercase__: str = torch.zeros_like(_UpperCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__: Dict = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCAmelCase )
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = 25 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 4.0 , _UpperCAmelCase = 64 , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , ):
if isinstance(_UpperCAmelCase , PIL.Image.Image ):
lowercase__: str = 1
elif isinstance(_UpperCAmelCase , torch.Tensor ):
lowercase__: int = image.shape[0]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase__: int = len(_UpperCAmelCase )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCAmelCase )}""" )
lowercase__: Optional[Any] = self._execution_device
lowercase__: Dict = batch_size * num_images_per_prompt
lowercase__: str = guidance_scale > 1.0
lowercase__: List[Any] = self._encode_image(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# prior
self.scheduler.set_timesteps(_UpperCAmelCase , device=_UpperCAmelCase )
lowercase__: Union[str, Any] = self.scheduler.timesteps
lowercase__: List[Any] = self.prior.config.num_embeddings
lowercase__: List[str] = self.prior.config.embedding_dim
lowercase__: int = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase__: List[Any] = latents.reshape(latents.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__: Optional[int] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = self.prior(
_UpperCAmelCase , timestep=_UpperCAmelCase , proj_embedding=_UpperCAmelCase , ).predicted_image_embedding
# remove the variance
lowercase__, lowercase__: Optional[int] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase__, lowercase__: Optional[int] = noise_pred.chunk(2 )
lowercase__: Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase__: Optional[int] = self.scheduler.step(
_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCAmelCase )
lowercase__: Any = []
for i, latent in enumerate(_UpperCAmelCase ):
print()
lowercase__: Any = self.renderer.decode(
latent[None, :] , _UpperCAmelCase , size=_UpperCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_UpperCAmelCase )
lowercase__: Tuple = torch.stack(_UpperCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
lowercase__: Tuple = images.cpu().numpy()
if output_type == "pil":
lowercase__: List[Any] = [self.numpy_to_pil(_UpperCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCAmelCase )
| 2 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = "unispeech-sat"
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
lowercase__: Union[str, Any] = hidden_size
lowercase__: Union[str, Any] = feat_extract_norm
lowercase__: Any = feat_extract_activation
lowercase__: List[Any] = list(_UpperCAmelCase )
lowercase__: Optional[int] = list(_UpperCAmelCase )
lowercase__: int = list(_UpperCAmelCase )
lowercase__: Any = conv_bias
lowercase__: List[str] = num_conv_pos_embeddings
lowercase__: List[str] = num_conv_pos_embedding_groups
lowercase__: int = len(self.conv_dim )
lowercase__: Dict = num_hidden_layers
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: Optional[Any] = num_attention_heads
lowercase__: Union[str, Any] = hidden_dropout
lowercase__: List[Any] = attention_dropout
lowercase__: str = activation_dropout
lowercase__: Optional[Any] = feat_proj_dropout
lowercase__: Optional[int] = final_dropout
lowercase__: Any = layerdrop
lowercase__: int = layer_norm_eps
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[Any] = num_clusters
lowercase__: Dict = do_stable_layer_norm
lowercase__: List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__: Dict = apply_spec_augment
lowercase__: Union[str, Any] = mask_time_prob
lowercase__: List[str] = mask_time_length
lowercase__: Union[str, Any] = mask_time_min_masks
lowercase__: str = mask_feature_prob
lowercase__: Dict = mask_feature_length
lowercase__: List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__: Tuple = num_codevectors_per_group
lowercase__: Optional[Any] = num_codevector_groups
lowercase__: int = contrastive_logits_temperature
lowercase__: Any = feat_quantizer_dropout
lowercase__: int = num_negatives
lowercase__: Optional[Any] = codevector_dim
lowercase__: int = proj_codevector_dim
lowercase__: str = diversity_loss_weight
# ctc loss
lowercase__: int = ctc_loss_reduction
lowercase__: Union[str, Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__: Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = list(_UpperCAmelCase )
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = xvector_output_dim
@property
def _snake_case ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 2 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Dict =DebertaTokenizer
lowerCamelCase : Optional[Any] =True
lowerCamelCase : List[Any] =DebertaTokenizerFast
def __a ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
a : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
a : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a : Dict = {"unk_token": "[UNK]"}
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def __a ( self , **lowerCAmelCase__ ) -> str:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
a : Dict = "lower newer"
a : Dict = "lower newer"
return input_text, output_text
def __a ( self ) -> List[Any]:
a : str = self.get_tokenizer()
a : str = "lower newer"
a : Union[str, Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a : Optional[int] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = tokens + [tokenizer.unk_token]
a : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : List[Any] = self.get_tokenizer()
a : Optional[Any] = tokenizer("Hello" , "World" )
a : List[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , lowerCAmelCase__ )
@slow
def __a ( self ) -> Tuple:
a : Tuple = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
a : Dict = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
a : str = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
a : Dict = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
a : Optional[int] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
a : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
a : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __a ( self ) -> str:
a : str = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
a : int = tokenizer_class.from_pretrained("microsoft/deberta-base" )
a : Optional[int] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
a : Dict = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
a : Optional[Any] = [tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) for seq in encoding["input_ids"]]
# fmt: off
a : Optional[int] = {
"input_ids": [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
a : Union[str, Any] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , lowerCAmelCase__ )
for expected, decoded in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 105 |
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self , *,
lowerCAmelCase__ = 4 , lowerCAmelCase__ = 768 , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[Any]:
super().__init__()
a : Tuple = nn.Parameter(torch.zeros(lowerCAmelCase__ ) )
# parameters for additional clip time embeddings
a : str = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
a : Any = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
# parameters for encoder hidden states
a : int = clip_extra_context_tokens
a : int = nn.Linear(
lowerCAmelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
a : Any = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
a : str = nn.LayerNorm(lowerCAmelCase__ )
def __a ( self , *, lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
a : str = image_embeddings.shape[0]
a : Optional[int] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
a : Any = classifier_free_guidance_embeddings.expand(
lowerCAmelCase__ , -1 )
a : Any = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
a : List[str] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
a : Dict = self.embedding_proj(lowerCAmelCase__ )
a : List[str] = self.clip_image_embeddings_project_to_time_embeddings(lowerCAmelCase__ )
a : Dict = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
a : Union[str, Any] = self.clip_extra_context_tokens_proj(lowerCAmelCase__ )
a : List[str] = clip_extra_context_tokens.reshape(lowerCAmelCase__ , -1 , self.clip_extra_context_tokens )
a : Optional[Any] = clip_extra_context_tokens.permute(0 , 2 , 1 )
a : Optional[int] = self.encoder_hidden_states_proj(lowerCAmelCase__ )
a : str = self.text_encoder_hidden_states_norm(lowerCAmelCase__ )
a : List[str] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 105 | 1 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __UpperCAmelCase :
pass
| 351 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : int = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : int = '''canine'''
def __init__( self , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1_6384 , snake_case=16 , snake_case=0.02 , snake_case=1e-1_2 , snake_case=0 , snake_case=0xE000 , snake_case=0xE001 , snake_case=4 , snake_case=4 , snake_case=8 , snake_case=1_6384 , snake_case=128 , **snake_case , ):
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
# Character config:
snake_case_ = downsampling_rate
snake_case_ = upsampling_kernel_size
snake_case_ = num_hash_functions
snake_case_ = num_hash_buckets
snake_case_ = local_transformer_stride
| 200 | 0 |
def UpperCAmelCase__ ( _A : int ):
'''simple docstring'''
a__ =0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCAmelCase__ ( _A : int = 1_00 ):
'''simple docstring'''
a__ =1
a__ =2
for i in range(2 , max_n + 1 ):
a__ =pre_numerator
a__ =2 * i // 3 if i % 3 == 0 else 1
a__ =cur_numerator
a__ =e_cont * pre_numerator + temp
return sum_digits(_A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 188 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase__ ( _A : List[Any] ):
'''simple docstring'''
if "resnet-50" in model_name:
a__ =ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
a__ =ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
a__ =DetrConfig(use_timm_backbone=_A , backbone_config=_A )
# set label attributes
a__ ='''panoptic''' in model_name
if is_panoptic:
a__ =2_50
else:
a__ =91
a__ ='''huggingface/label-files'''
a__ ='''coco-detection-id2label.json'''
a__ =json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
a__ ={int(_A ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCAmelCase__ ( _A : Optional[int] ):
'''simple docstring'''
a__ =[]
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def UpperCAmelCase__ ( _A : str , _A : Union[str, Any] , _A : int ):
'''simple docstring'''
a__ =state_dict.pop(_A )
a__ =val
def UpperCAmelCase__ ( _A : Dict , _A : str=False ):
'''simple docstring'''
a__ =''''''
if is_panoptic:
a__ ='''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a__ =state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a__ =in_proj_weight[:2_56, :]
a__ =in_proj_bias[:2_56]
a__ =in_proj_weight[2_56:5_12, :]
a__ =in_proj_bias[2_56:5_12]
a__ =in_proj_weight[-2_56:, :]
a__ =in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a__ =in_proj_weight[:2_56, :]
a__ =in_proj_bias[:2_56]
a__ =in_proj_weight[2_56:5_12, :]
a__ =in_proj_bias[2_56:5_12]
a__ =in_proj_weight[-2_56:, :]
a__ =in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
a__ =state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a__ =in_proj_weight_cross_attn[:2_56, :]
a__ =in_proj_bias_cross_attn[:2_56]
a__ =in_proj_weight_cross_attn[2_56:5_12, :]
a__ =in_proj_bias_cross_attn[2_56:5_12]
a__ =in_proj_weight_cross_attn[-2_56:, :]
a__ =in_proj_bias_cross_attn[-2_56:]
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
a__ =Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( _A : Dict , _A : Optional[Any]=None , _A : List[str]=False ):
'''simple docstring'''
a__, a__ =get_detr_config(_A )
# load original model from torch hub
a__ ={
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F"""Converting model {model_name}...""" )
a__ =torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=_A ).eval()
a__ =detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_A ):
if is_panoptic:
a__ ='''detr.''' + src
rename_key(_A , _A , _A )
# query, key and value matrices need special treatment
read_in_q_k_v(_A , is_panoptic=_A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a__ ='''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
a__ =state_dict.pop(_A )
a__ =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a__ =state_dict.pop(_A )
a__ =val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
a__ =state_dict.pop(_A )
a__ =val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
a__ =state_dict.pop(_A )
a__ =val
# finally, create HuggingFace model and load state dict
a__ =DetrForSegmentation(_A ) if is_panoptic else DetrForObjectDetection(_A )
model.load_state_dict(_A )
model.eval()
# verify our conversion on an image
a__ ='''coco_panoptic''' if is_panoptic else '''coco_detection'''
a__ =DetrImageProcessor(format=_A )
a__ =processor(images=prepare_img() , return_tensors='''pt''' )
a__ =encoding['''pixel_values''']
a__ =detr(_A )
a__ =model(_A )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
lowerCamelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 188 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCAmelCase ( A_ ,unittest.TestCase ):
A__ : str = BarthezTokenizer
A__ : List[Any] = BarthezTokenizerFast
A__ : Optional[int] = True
A__ : Optional[int] = True
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
super().setUp()
snake_case : Tuple = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__ )
snake_case : List[str] = tokenizer
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = "<pad>"
snake_case : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCAmelCase__ ) , 10_11_22 )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
snake_case : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
snake_case : Optional[int] = [0, 57, 30_18, 7_03_07, 91, 2]
snake_case : int = self.tokenizer(
lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
snake_case : str = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
snake_case : Optional[int] = self.get_tokenizer()
snake_case : Optional[int] = self.get_rust_tokenizer()
snake_case : Tuple = "I was born in 92000, and this is falsé."
snake_case : Dict = tokenizer.tokenize(lowerCAmelCase__ )
snake_case : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
snake_case : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case : Optional[Any] = self.get_rust_tokenizer()
snake_case : Optional[Any] = tokenizer.encode(lowerCAmelCase__ )
snake_case : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _SCREAMING_SNAKE_CASE (self : int ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = {"input_ids": [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
snake_case : Tuple = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=lowerCAmelCase__ , )
| 366 |
from __future__ import annotations
__lowerCamelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCAmelCase :
def __init__(self : Tuple , snake_case__ : dict[str, list[str]] , snake_case__ : str ) -> None:
'''simple docstring'''
snake_case : str = graph
# mapping node to its parent in resulting breadth first tree
snake_case : dict[str, str | None] = {}
snake_case : Union[str, Any] = source_vertex
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> None:
'''simple docstring'''
snake_case : Any = {self.source_vertex}
snake_case : str = None
snake_case : List[str] = [self.source_vertex] # first in first out queue
while queue:
snake_case : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(snake_case__ )
snake_case : Any = vertex
queue.append(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case : str = self.parent.get(snake_case__ )
if target_vertex_parent is None:
snake_case : Optional[Any] = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(snake_case__ )
return self.shortest_path(snake_case__ ) + f"""->{target_vertex}"""
if __name__ == "__main__":
__lowerCamelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[str, float]:
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | '''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , a : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.ModuleList(a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : torch.FloatTensor , a : Union[torch.Tensor, float, int] , a : torch.Tensor , a : List[torch.tensor] , a : List[float] , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[Dict[str, Any]] = None , a : bool = False , a : bool = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(a , a , self.nets ) ):
__lowerCamelCase , __lowerCamelCase = controlnet(
a , a , a , a , a , a , a , a , a , a , a , )
# merge samples
if i == 0:
__lowerCamelCase , __lowerCamelCase = down_samples, mid_sample
else:
__lowerCamelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a , a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Union[str, os.PathLike] , a : bool = True , a : Callable = None , a : bool = False , a : Optional[str] = None , ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a , is_main_process=a , save_function=a , safe_serialization=a , variant=a , )
idx += 1
__lowerCamelCase = model_path_to_save + f"""_{idx}"""
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , a : Optional[Union[str, os.PathLike]] , **a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowerCamelCase = pretrained_model_path
while os.path.isdir(a ):
__lowerCamelCase = ControlNetModel.from_pretrained(a , **a )
controlnets.append(a )
idx += 1
__lowerCamelCase = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(a )} controlnets loaded from {pretrained_model_path}.""" )
if len(a ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(a )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(a )
| 67 | 1 |
from scipy.stats import pearsonr
import datasets
lowercase_ = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
lowercase_ = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
lowercase_ = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case ( datasets.Metric):
def A__ ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ), reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"], )
def A__ ( self : Union[str, Any], __lowercase : Tuple, __lowercase : Optional[int], __lowercase : Tuple=False ):
if return_pvalue:
lowercase__ = pearsonr(__lowercase, __lowercase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__lowercase, __lowercase )[0] )}
| 350 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Any ="""perceiver"""
def __init__( self : Any, __lowercase : Optional[Any]=256, __lowercase : List[str]=1280, __lowercase : Dict=768, __lowercase : int=1, __lowercase : Dict=26, __lowercase : Any=8, __lowercase : List[Any]=8, __lowercase : Dict=None, __lowercase : List[Any]=None, __lowercase : str="kv", __lowercase : str=1, __lowercase : Optional[Any]=1, __lowercase : str="gelu", __lowercase : List[str]=0.1, __lowercase : int=0.02, __lowercase : Union[str, Any]=1e-1_2, __lowercase : Optional[Any]=True, __lowercase : Optional[Any]=262, __lowercase : str=2048, __lowercase : Optional[Any]=56, __lowercase : str=[368, 496], __lowercase : str=16, __lowercase : int=1920, __lowercase : Dict=16, __lowercase : List[Any]=[1, 16, 224, 224], **__lowercase : str, ):
super().__init__(**__lowercase )
lowercase__ = num_latents
lowercase__ = d_latents
lowercase__ = d_model
lowercase__ = num_blocks
lowercase__ = num_self_attends_per_block
lowercase__ = num_self_attention_heads
lowercase__ = num_cross_attention_heads
lowercase__ = qk_channels
lowercase__ = v_channels
lowercase__ = cross_attention_shape_for_attention
lowercase__ = self_attention_widening_factor
lowercase__ = cross_attention_widening_factor
lowercase__ = hidden_act
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = use_query_residual
# masked language modeling attributes
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
# image classification attributes
lowercase__ = image_size
# flow attributes
lowercase__ = train_size
# multimodal autoencoding attributes
lowercase__ = num_frames
lowercase__ = audio_samples_per_frame
lowercase__ = samples_per_patch
lowercase__ = output_shape
class _snake_case ( lowercase__):
@property
def A__ ( self : Optional[int] ):
if self.task == "multiple-choice":
lowercase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def A__ ( self : Optional[Any] ):
return 1e-4
def A__ ( self : Tuple, __lowercase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], __lowercase : int = -1, __lowercase : int = -1, __lowercase : int = -1, __lowercase : bool = False, __lowercase : Optional[TensorType] = None, __lowercase : int = 3, __lowercase : int = 40, __lowercase : int = 40, ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__lowercase, __lowercase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ = compute_effective_axis_dimension(
__lowercase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ = preprocessor.num_special_tokens_to_add(__lowercase )
lowercase__ = compute_effective_axis_dimension(
__lowercase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
lowercase__ = [" ".join(["a"] ) * seq_length] * batch_size
lowercase__ = dict(preprocessor(__lowercase, return_tensors=__lowercase ) )
lowercase__ = inputs.pop("input_ids" )
return inputs
elif isinstance(__lowercase, __lowercase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ = compute_effective_axis_dimension(__lowercase, fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase__ = self._generate_dummy_images(__lowercase, __lowercase, __lowercase, __lowercase )
lowercase__ = dict(preprocessor(images=__lowercase, return_tensors=__lowercase ) )
lowercase__ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 224 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return (preds == labels).mean()
@dataclass
class __A :
'''simple docstring'''
__lowercase: str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
__lowercase: Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
__lowercase: Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
__lowercase: Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __A :
'''simple docstring'''
__lowercase: str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys())})
__lowercase: str = field(metadata={"""help""": """Should contain the data files for the task."""})
__lowercase: int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowercase: bool = field(
default=snake_case__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
def _a ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ = processors[data_args.task_name]()
snake_case_ = processor.get_labels()
snake_case_ = len(_SCREAMING_SNAKE_CASE )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , p.label_ids )}
# Data collator
snake_case_ = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(_SCREAMING_SNAKE_CASE , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write("""%s = %s\n""" % (key, value) )
results.update(_SCREAMING_SNAKE_CASE )
return results
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if num < 0:
return False
snake_case_ = num
snake_case_ = 0
while num > 0:
snake_case_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 1 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _lowercase ( _lowercase ):
def __init__( self: Optional[int] , UpperCamelCase__: UNetaDModel , UpperCamelCase__: UNetaDModel , UpperCamelCase__: DDPMScheduler , UpperCamelCase__: Optional[Any] , ):
super().__init__()
lowerCamelCase__ : Any = value_function
lowerCamelCase__ : int = unet
lowerCamelCase__ : Any = scheduler
lowerCamelCase__ : Optional[Any] = env
lowerCamelCase__ : List[Any] = env.get_dataset()
lowerCamelCase__ : Tuple = {}
for key in self.data.keys():
try:
lowerCamelCase__ : List[str] = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : str = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Union[str, Any] = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : Any = env.observation_space.shape[0]
lowerCamelCase__ : Dict = env.action_space.shape[0]
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] ):
return (x_in - self.means[key]) / self.stds[key]
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: int ):
return x_in * self.stds[key] + self.means[key]
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Union[str, Any] ):
if type(UpperCamelCase__ ) is dict:
return {k: self.to_torch(UpperCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(UpperCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(UpperCamelCase__ , device=self.unet.device )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ):
for key, val in cond.items():
lowerCamelCase__ : Optional[Any] = val.clone()
return x_in
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int ):
lowerCamelCase__ : Any = x.shape[0]
lowerCamelCase__ : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Any = torch.full((batch_size,) , UpperCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(UpperCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : Tuple = self.value_function(x.permute(0 , 2 , 1 ) , UpperCamelCase__ ).sample
lowerCamelCase__ : List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
lowerCamelCase__ : List[Any] = self.scheduler._get_variance(UpperCamelCase__ )
lowerCamelCase__ : Any = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Any = model_std * grad
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : List[Any] = x.detach()
lowerCamelCase__ : Dict = x + scale * grad
lowerCamelCase__ : Union[str, Any] = self.reset_xa(UpperCamelCase__ , UpperCamelCase__ , self.action_dim )
lowerCamelCase__ : List[Any] = self.unet(x.permute(0 , 2 , 1 ) , UpperCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Dict = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , predict_epsilon=UpperCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : Dict = self.reset_xa(UpperCamelCase__ , UpperCamelCase__ , self.action_dim )
lowerCamelCase__ : Optional[Any] = self.to_torch(UpperCamelCase__ )
return x, y
def __call__( self: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Optional[Any]=64 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: str=0.1 ):
# normalize the observations and create batch dimension
lowerCamelCase__ : Optional[int] = self.normalize(UpperCamelCase__ , """observations""" )
lowerCamelCase__ : Union[str, Any] = obs[None].repeat(UpperCamelCase__ , axis=0 )
lowerCamelCase__ : List[str] = {0: self.to_torch(UpperCamelCase__ )}
lowerCamelCase__ : Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : List[str] = randn_tensor(UpperCamelCase__ , device=self.unet.device )
lowerCamelCase__ : Dict = self.reset_xa(UpperCamelCase__ , UpperCamelCase__ , self.action_dim )
lowerCamelCase__ : Optional[Any] = self.to_torch(UpperCamelCase__ )
# run the diffusion process
lowerCamelCase__ : Union[str, Any] = self.run_diffusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# sort output trajectories by value
lowerCamelCase__ : List[Any] = y.argsort(0 , descending=UpperCamelCase__ ).squeeze()
lowerCamelCase__ : List[str] = x[sorted_idx]
lowerCamelCase__ : Any = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : List[Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : List[str] = self.de_normalize(UpperCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : Dict = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[int] = np.random.randint(0 , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = denorm_actions[selected_index, 0]
return denorm_actions
| 358 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_A : List[str] =logging.get_logger(__name__)
class _lowercase ( _lowercase ):
a = ["""input_features""", """attention_mask"""]
def __init__( self: Optional[int] , UpperCamelCase__: str=80 , UpperCamelCase__: Union[str, Any]=16_000 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Tuple=10 , UpperCamelCase__: Dict=25 , UpperCamelCase__: List[Any]="hamming_window" , UpperCamelCase__: Tuple=32_768.0 , UpperCamelCase__: List[Any]=0.97 , UpperCamelCase__: Optional[int]=1.0 , UpperCamelCase__: str=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Optional[int]=False , **UpperCamelCase__: Optional[int] , ):
super().__init__(feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : Dict = feature_size
lowerCamelCase__ : Tuple = sampling_rate
lowerCamelCase__ : Union[str, Any] = padding_value
lowerCamelCase__ : Union[str, Any] = hop_length
lowerCamelCase__ : Any = win_length
lowerCamelCase__ : Tuple = frame_signal_scale
lowerCamelCase__ : Dict = preemphasis_coeff
lowerCamelCase__ : Optional[Any] = mel_floor
lowerCamelCase__ : Any = normalize_means
lowerCamelCase__ : Optional[int] = normalize_vars
lowerCamelCase__ : Dict = win_function
lowerCamelCase__ : Dict = return_attention_mask
lowerCamelCase__ : Optional[Any] = win_length * sampling_rate // 1_000
lowerCamelCase__ : Tuple = hop_length * sampling_rate // 1_000
lowerCamelCase__ : List[Any] = optimal_fft_length(self.sample_size )
lowerCamelCase__ : List[str] = (self.n_fft // 2) + 1
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: np.array ):
if self.win_function == "hamming_window":
lowerCamelCase__ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCamelCase__ )
else:
lowerCamelCase__ : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function )
lowerCamelCase__ : List[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
lowerCamelCase__ : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=UpperCamelCase__ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=UpperCamelCase__ , preemphasis=self.preemphasis_coeff , mel_filters=UpperCamelCase__ , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ):
# make sure we normalize float32 arrays
if self.normalize_means:
lowerCamelCase__ : List[str] = x[:input_length].mean(axis=0 )
lowerCamelCase__ : List[str] = np.subtract(UpperCamelCase__ , UpperCamelCase__ )
if self.normalize_vars:
lowerCamelCase__ : Union[str, Any] = x[:input_length].std(axis=0 )
lowerCamelCase__ : str = np.divide(UpperCamelCase__ , UpperCamelCase__ )
if input_length < x.shape[0]:
lowerCamelCase__ : List[Any] = padding_value
# make sure array is in float32
lowerCamelCase__ : str = x.astype(np.floataa )
return x
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[np.ndarray] , UpperCamelCase__: Optional[np.ndarray] = None ):
lowerCamelCase__ : Optional[int] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(UpperCamelCase__ , UpperCamelCase__ , self.padding_value ) for x, n in zip(UpperCamelCase__ , UpperCamelCase__ )]
def __call__( self: List[str] , UpperCamelCase__: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__: Union[bool, str, PaddingStrategy] = False , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: bool = False , UpperCamelCase__: Optional[int] = None , UpperCamelCase__: Optional[bool] = None , UpperCamelCase__: Optional[Union[str, TensorType]] = None , UpperCamelCase__: Optional[int] = None , **UpperCamelCase__: Tuple , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase__ : List[str] = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase__ : List[str] = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ : str = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
lowerCamelCase__ : List[Any] = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ : Optional[Any] = [raw_speech]
# extract fbank features
lowerCamelCase__ : str = [self._extract_mfsc_features(UpperCamelCase__ ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase__ : str = BatchFeature({"""input_features""": features} )
lowerCamelCase__ : str = self.pad(
UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
# make sure list is in array format
lowerCamelCase__ : str = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , UpperCamelCase__ ):
lowerCamelCase__ : Dict = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_features]
lowerCamelCase__ : List[str] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowerCamelCase__ : Any = [np.asarray(UpperCamelCase__ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCamelCase__ : str = (
np.array(UpperCamelCase__ , dtype=np.intaa )
if self._get_padding_strategies(UpperCamelCase__ , max_length=UpperCamelCase__ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCamelCase__ : Union[str, Any] = self.normalize(
padded_inputs["""input_features"""] , attention_mask=UpperCamelCase__ )
if return_tensors is not None:
lowerCamelCase__ : Dict = padded_inputs.convert_to_tensors(UpperCamelCase__ )
return padded_inputs
| 129 | 0 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : Any ):
'''simple docstring'''
_a : Union[str, Any] = str(id_ )
_a : List[Any] = None
_a : int = None
_a : Union[str, Any] = []
_a : Any = {} # {vertex:distance}
def __lt__( self : Any ,_a : Optional[Any] ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : List[Any] ):
'''simple docstring'''
return self.id
def __lowercase ( self : List[str] ,_a : str ):
'''simple docstring'''
self.neighbors.append(_a )
def __lowercase ( self : Any ,_a : Any ,_a : Dict ):
'''simple docstring'''
_a : Dict = weight
def UpperCAmelCase_ (__a : List[Any] , __a : Union[str, Any] , __a : int , __a : Tuple ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __a )
graph[b - 1].add_edge(graph[a - 1] , __a )
def UpperCAmelCase_ (__a : list , __a : Vertex ):
"""simple docstring"""
_a : List[str] = []
for u in graph:
_a : Any = math.inf
_a : Optional[Any] = None
_a : Any = 0
_a : Tuple = graph[:]
while q:
_a : Any = min(__a )
q.remove(__a )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a : List[Any] = u
_a : Any = u.edges[v.id]
for i in range(1 , len(__a ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase_ (__a : list , __a : Vertex ):
"""simple docstring"""
for u in graph:
_a : Any = math.inf
_a : List[Any] = None
_a : Union[str, Any] = 0
_a : Tuple = list(__a )
hq.heapify(__a )
while h:
_a : Any = hq.heappop(__a )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a : int = u
_a : str = u.edges[v.id]
hq.heapify(__a )
for i in range(1 , len(__a ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase_ ():
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : int ,*_a : Optional[int] ,**_a : str ):
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 271 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A =logging.get_logger(__name__)
__A ='''▁'''
__A ={'''vocab_file''': '''sentencepiece.bpe.model'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
__A ={
'''facebook/mbart-large-50-one-to-many-mmt''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase , lowercase=None , lowercase=None , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase = None , **lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase_ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowercase , tgt_lang=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase ) )
lowerCamelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase_ = 1
lowerCamelCase_ = len(self.sp_model )
lowerCamelCase_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase )
}
lowerCamelCase_ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCamelCase_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCamelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.lang_code_to_id[self._src_lang]
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_( self ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , lowercase ) -> None:
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
return self.sp_model.encode(lowercase , out_type=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ = self.sp_model.PieceToId(lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
lowerCamelCase_ = []
lowerCamelCase_ = ""
lowerCamelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
lowerCamelCase_ = True
lowerCamelCase_ = []
else:
current_sub_tokens.append(lowercase )
lowerCamelCase_ = False
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , "wb" ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
lowerCamelCase_ = [1] * len(self.prefix_tokens )
lowerCamelCase_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase )) + suffix_ones
return prefix_ones + ([0] * len(lowercase )) + ([0] * len(lowercase )) + suffix_ones
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.lang_code_to_id[src_lang]
lowerCamelCase_ = [self.cur_lang_code_id]
lowerCamelCase_ = [self.eos_token_id]
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.lang_code_to_id[tgt_lang]
lowerCamelCase_ = [self.cur_lang_code_id]
lowerCamelCase_ = [self.eos_token_id]
| 369 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = BioGptModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any:
lowerCamelCase_ = BioGptForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Tuple:
lowerCamelCase_ = BioGptModel(config=lowercase )
model.to(lowercase )
model.eval()
# create attention mask
lowerCamelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase )
lowerCamelCase_ = self.seq_length // 2
lowerCamelCase_ = 0
# first forward pass
lowerCamelCase_ , lowerCamelCase_ = model(lowercase , attention_mask=lowercase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
lowerCamelCase_ = ids_tensor((1,) , lowercase ).item() + 1
lowerCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
lowerCamelCase_ = random_other_next_tokens
# append to next input_ids and attn_mask
lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase )] , dim=1 , )
# get two different outputs
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )["last_hidden_state"]
lowerCamelCase_ = model(lowercase , past_key_values=lowercase , attention_mask=lowercase )["last_hidden_state"]
# select random slice
lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = BioGptModel(config=lowercase ).to(lowercase ).eval()
lowerCamelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase )
# first forward pass
lowerCamelCase_ = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
lowerCamelCase_ , lowerCamelCase_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )["last_hidden_state"]
lowerCamelCase_ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[
"last_hidden_state"
]
# select random slice
lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase , lowercase=False ) -> Optional[int]:
lowerCamelCase_ = BioGptForCausalLM(lowercase )
model.to(lowercase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
lowerCamelCase_ = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def SCREAMING_SNAKE_CASE_( self , lowercase , *lowercase ) -> List[Any]:
lowerCamelCase_ = BioGptModel(lowercase )
lowerCamelCase_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Any:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = BioGptForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = BioGptModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase , gradient_checkpointing=lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(lowercase )
lowerCamelCase_ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
lowerCamelCase_ = "left"
# Define PAD Token = EOS Token = 50256
lowerCamelCase_ = tokenizer.eos_token
lowerCamelCase_ = model.config.eos_token_id
# use different length sentences to test batching
lowerCamelCase_ = [
"Hello, my dog is a little",
"Today, I",
]
lowerCamelCase_ = tokenizer(lowercase , return_tensors="pt" , padding=lowercase )
lowerCamelCase_ = inputs["input_ids"].to(lowercase )
lowerCamelCase_ = model.generate(
input_ids=lowercase , attention_mask=inputs["attention_mask"].to(lowercase ) , )
lowerCamelCase_ = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(lowercase )
lowerCamelCase_ = model.generate(input_ids=lowercase )
lowerCamelCase_ = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
lowerCamelCase_ = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(lowercase )
lowerCamelCase_ = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings )
lowerCamelCase_ = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
lowerCamelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase )
lowerCamelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase )
lowerCamelCase_ = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> int:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = BioGptModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = input_dict["input_ids"]
lowerCamelCase_ = input_ids.ne(1 ).to(lowercase )
lowerCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ = BioGptForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = "multi_label_classification"
lowerCamelCase_ = input_dict["input_ids"]
lowerCamelCase_ = input_ids.ne(1 ).to(lowercase )
lowerCamelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase_ = BioGptForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
lowerCamelCase_ = torch.tensor([[2, 4805, 9, 656, 21]] )
lowerCamelCase_ = model(lowercase )[0]
lowerCamelCase_ = 42384
lowerCamelCase_ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowercase )
lowerCamelCase_ = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
lowerCamelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(lowercase )
torch.manual_seed(0 )
lowerCamelCase_ = tokenizer("COVID-19 is" , return_tensors="pt" ).to(lowercase )
lowerCamelCase_ = model.generate(
**lowercase , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=lowercase , )
lowerCamelCase_ = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase )
lowerCamelCase_ = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(lowercase , lowercase )
| 47 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = UnCLIPImageVariationPipeline
__lowerCamelCase = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
__lowerCamelCase = IMAGE_VARIATION_BATCH_PARAMS
__lowerCamelCase = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
__lowerCamelCase = False
@property
def __a ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def __a ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def __a ( self ) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def __a ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
return 100
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__UpperCamelCase )
@property
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__UpperCamelCase )
@property
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Any = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
snake_case__ : Tuple = UnCLIPTextProjModel(**__UpperCamelCase )
return model
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : List[Any] = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
snake_case__ : Optional[Any] = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def __a ( self ) -> str:
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(1 )
snake_case__ : str = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = self.dummy_decoder
snake_case__ : Tuple = self.dummy_text_proj
snake_case__ : List[str] = self.dummy_text_encoder
snake_case__ : str = self.dummy_tokenizer
snake_case__ : Optional[int] = self.dummy_super_res_first
snake_case__ : List[Any] = self.dummy_super_res_last
snake_case__ : Optional[Any] = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1000 , )
snake_case__ : Dict = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1000 , )
snake_case__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
snake_case__ : int = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 , __UpperCamelCase=True ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : Any = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : str = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
if pil_image:
snake_case__ : int = input_image * 0.5 + 0.5
snake_case__ : Optional[Any] = input_image.clamp(0 , 1 )
snake_case__ : Any = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case__ : List[str] = DiffusionPipeline.numpy_to_pil(__UpperCamelCase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = 'cpu'
snake_case__ : List[Any] = self.get_dummy_components()
snake_case__ : List[str] = self.pipeline_class(**__UpperCamelCase )
snake_case__ : Optional[int] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : int = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : List[str] = pipe(**__UpperCamelCase )
snake_case__ : str = output.images
snake_case__ : Optional[int] = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : str = pipe(
**__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1]
snake_case__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ : int = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Dict = 'cpu'
snake_case__ : Dict = self.get_dummy_components()
snake_case__ : Optional[Any] = self.pipeline_class(**__UpperCamelCase )
snake_case__ : Optional[Any] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Dict = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : Union[str, Any] = pipe(**__UpperCamelCase )
snake_case__ : Dict = output.images
snake_case__ : int = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : Any = pipe(
**__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
snake_case__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ : List[str] = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = 'cpu'
snake_case__ : Union[str, Any] = self.get_dummy_components()
snake_case__ : str = self.pipeline_class(**__UpperCamelCase )
snake_case__ : Union[str, Any] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : List[str] = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : List[Any] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
snake_case__ : Optional[Any] = pipe(**__UpperCamelCase )
snake_case__ : Dict = output.images
snake_case__ : List[Any] = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : Any = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
snake_case__ : Dict = pipe(
**__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
snake_case__ : List[Any] = image[0, -3:, -3:, -1]
snake_case__ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
snake_case__ : List[Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : str = torch.device('cpu' )
class __snake_case :
__lowerCamelCase = 1
snake_case__ : List[Any] = self.get_dummy_components()
snake_case__ : Dict = self.pipeline_class(**__UpperCamelCase )
snake_case__ : List[str] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : int = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
snake_case__ : Optional[Any] = pipe.decoder.dtype
snake_case__ : Tuple = 1
snake_case__ : List[Any] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case__ : Optional[Any] = pipe.prepare_latents(
__UpperCamelCase , dtype=__UpperCamelCase , device=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , scheduler=DummyScheduler() )
snake_case__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case__ : Any = pipe.prepare_latents(
__UpperCamelCase , dtype=__UpperCamelCase , device=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , scheduler=DummyScheduler() )
snake_case__ : Optional[Any] = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : int = pipe(
**__UpperCamelCase , decoder_latents=__UpperCamelCase , super_res_latents=__UpperCamelCase ).images
snake_case__ : Optional[int] = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
# Don't pass image, instead pass embedding
snake_case__ : Union[str, Any] = pipeline_inputs.pop('image' )
snake_case__ : Optional[Any] = pipe.image_encoder(__UpperCamelCase ).image_embeds
snake_case__ : str = pipe(
**__UpperCamelCase , decoder_latents=__UpperCamelCase , super_res_latents=__UpperCamelCase , image_embeddings=__UpperCamelCase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[str] = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case__ : Any = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCamelCase , expected_max_diff=__UpperCamelCase )
@skip_mps
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = torch_device == 'cpu'
snake_case__ : Any = True
snake_case__ : int = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=__UpperCamelCase , relax_max_difference=__UpperCamelCase , additional_params_copy_to_batched_inputs=__UpperCamelCase , )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Any = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case__ : Dict = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__UpperCamelCase , additional_params_copy_to_batched_inputs=__UpperCamelCase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__UpperCamelCase )
@skip_mps
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __a ( self ) -> Dict:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __a ( self ) -> int:
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
snake_case__ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
snake_case__ : Optional[int] = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
snake_case__ : Any = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case__ : Union[str, Any] = pipeline(
__UpperCamelCase , generator=__UpperCamelCase , output_type='np' , )
snake_case__ : Any = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase , 15 )
| 143 | import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase__ : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase__ : Optional[int] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase__ : Optional[Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase__ : List[Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase__ : Any = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase__ : Dict = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase__ : Optional[int] = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def UpperCamelCase__ ( ) -> Any:
snake_case__ , snake_case__ : List[str] = randrange(len(A__ ) ), randrange(len(A__ ) )
snake_case__ : str = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
snake_case__ , snake_case__ : Any = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCamelCase__ ( A__ = 100 ) -> Optional[int]:
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Union[str, Any]:
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Any:
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , A__ )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Dict:
snake_case__ : Optional[int] = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> str:
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Optional[Any]:
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> int:
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Union[str, Any]:
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def UpperCamelCase__ ( ) -> Union[str, Any]:
snake_case__ : Union[str, Any] = [PokerHand(A__ ) for hand in SORTED_HANDS]
snake_case__ : Optional[Any] = poker_hands.copy()
shuffle(A__ )
snake_case__ : Tuple = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def UpperCamelCase__ ( ) -> str:
# Test that five high straights are compared correctly.
snake_case__ : int = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCamelCase__ ( ) -> Union[str, Any]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
snake_case__ : Optional[int] = PokerHand('2C 4S AS 3D 5C' )
snake_case__ : Optional[int] = True
snake_case__ : Tuple = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCamelCase__ ( ) -> List[str]:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
snake_case__ : Any = 0
snake_case__ : Optional[Any] = os.path.abspath(os.path.dirname(A__ ) )
snake_case__ : List[str] = os.path.join(A__ , 'poker_hands.txt' )
with open(A__ ) as file_hand:
for line in file_hand:
snake_case__ : Tuple = line[:14].strip()
snake_case__ : List[str] = line[15:].strip()
snake_case__ , snake_case__ : Any = PokerHand(A__ ), PokerHand(A__ )
snake_case__ : Tuple = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376
| 143 | 1 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowercase :
lowercase = None
lowercase = False
lowercase = False
lowercase = False
lowercase = None
lowercase = None
lowercase = False
lowercase = False
lowercase = False
lowercase = True
lowercase = None
lowercase = 1
lowercase = None
lowercase = False
lowercase = None
lowercase = None
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> "DownloadConfig":
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(snake_case ) for k, v in self.__dict__.items()} )
| 50 | import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _lowercase ( unittest.TestCase ):
def __init__( self : List[Any] , snake_case : int , snake_case : Union[str, Any]=2 , snake_case : Optional[Any]=5_6 , snake_case : Dict=True , snake_case : Optional[Any]=True , snake_case : Any=True , snake_case : List[Any]=True , snake_case : Tuple=9_9 , snake_case : Any=3_2 , snake_case : List[Any]=2 , snake_case : Optional[Any]=2 , snake_case : str=7 , snake_case : Dict="gelu_new" , snake_case : List[str]=0.1 , snake_case : Dict=0.1 , snake_case : Optional[Any]=5_1_2 , snake_case : Tuple=1_6 , snake_case : Dict=2 , snake_case : List[str]=0.02 , snake_case : Optional[int]=4 , snake_case : str="block_sparse" , snake_case : List[Any]=True , snake_case : int=False , snake_case : Tuple=2 , snake_case : Optional[int]=3 , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = parent
UpperCamelCase_ : str = batch_size
UpperCamelCase_ : List[str] = seq_length
UpperCamelCase_ : Union[str, Any] = is_training
UpperCamelCase_ : Dict = use_attention_mask
UpperCamelCase_ : List[Any] = use_token_type_ids
UpperCamelCase_ : Optional[Any] = use_labels
UpperCamelCase_ : Dict = vocab_size
UpperCamelCase_ : Union[str, Any] = hidden_size
UpperCamelCase_ : Optional[Any] = num_hidden_layers
UpperCamelCase_ : Any = num_attention_heads
UpperCamelCase_ : Optional[Any] = intermediate_size
UpperCamelCase_ : Optional[Any] = hidden_act
UpperCamelCase_ : int = hidden_dropout_prob
UpperCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase_ : List[str] = max_position_embeddings
UpperCamelCase_ : List[Any] = type_vocab_size
UpperCamelCase_ : Any = type_sequence_label_size
UpperCamelCase_ : Optional[int] = initializer_range
UpperCamelCase_ : int = num_choices
UpperCamelCase_ : str = rescale_embeddings
UpperCamelCase_ : List[Any] = attention_type
UpperCamelCase_ : Optional[Any] = use_bias
UpperCamelCase_ : List[str] = block_size
UpperCamelCase_ : int = num_random_blocks
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : str = None
if self.use_attention_mask:
UpperCamelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ : int = None
if self.use_token_type_ids:
UpperCamelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ : Tuple = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Any = self.prepare_config_and_inputs()
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : int = config_and_inputs
UpperCamelCase_ : Tuple = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class _lowercase ( snake_case_ , unittest.TestCase ):
lowercase = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : List[str] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
"""simple docstring"""
super().test_hidden_states_output()
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase_ : Optional[Any] = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_ : Optional[Any] = self._prepare_for_class(snake_case , snake_case )
UpperCamelCase_ : Optional[Any] = model_class(snake_case )
@jax.jit
def model_jitted(snake_case : str , snake_case : List[str]=None , **snake_case : Tuple ):
return model(input_ids=snake_case , attention_mask=snake_case , **snake_case )
with self.subTest('JIT Enabled' ):
UpperCamelCase_ : List[str] = model_jitted(**snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase_ : List[str] = model_jitted(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) )
for jitted_output, output in zip(snake_case , snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Optional[Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Optional[int]=1e-5 , snake_case : Tuple="outputs" , snake_case : Dict=None ) -> Dict:
"""simple docstring"""
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
| 50 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = (DEISMultistepScheduler,)
_lowerCamelCase: Optional[Any] = (("num_inference_steps", 25),)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,**A_ : str ) -> List[str]:
A = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**__A )
return config
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Dict=0 ,**A_ : Optional[Any] ) -> Dict:
A = dict(self.forward_default_kwargs )
A = kwargs.pop('num_inference_steps' ,__A )
A = self.dummy_sample
A = 0.1 * sample
A = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config(**__A )
A = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals
A = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
A = scheduler_class.from_pretrained(__A )
new_scheduler.set_timesteps(__A )
# copy over dummy past residuals
A = dummy_past_residuals[: new_scheduler.config.solver_order]
A = sample, sample
for t in range(__A ,time_step + scheduler.config.solver_order + 1 ):
A = scheduler.step(__A ,__A ,__A ,**__A ).prev_sample
A = new_scheduler.step(__A ,__A ,__A ,**__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any]=0 ,**A_ : List[Any] ) -> List[Any]:
A = dict(self.forward_default_kwargs )
A = kwargs.pop('num_inference_steps' ,__A )
A = self.dummy_sample
A = 0.1 * sample
A = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config()
A = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals (must be after setting timesteps)
A = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
A = scheduler_class.from_pretrained(__A )
# copy over dummy past residuals
new_scheduler.set_timesteps(__A )
# copy over dummy past residual (must be after setting timesteps)
A = dummy_past_residuals[: new_scheduler.config.solver_order]
A = scheduler.step(__A ,__A ,__A ,**__A ).prev_sample
A = new_scheduler.step(__A ,__A ,__A ,**__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[Any]=None ,**A_ : List[Any] ) -> int:
if scheduler is None:
A = self.scheduler_classes[0]
A = self.get_scheduler_config(**__A )
A = scheduler_class(**__A )
A = self.scheduler_classes[0]
A = self.get_scheduler_config(**__A )
A = scheduler_class(**__A )
A = 10
A = self.dummy_model()
A = self.dummy_sample_deter
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.timesteps ):
A = model(__A ,__A )
A = scheduler.step(__A ,__A ,__A ).prev_sample
return sample
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = dict(self.forward_default_kwargs )
A = kwargs.pop('num_inference_steps' ,__A )
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config()
A = scheduler_class(**__A )
A = self.dummy_sample
A = 0.1 * sample
if num_inference_steps is not None and hasattr(__A ,'set_timesteps' ):
scheduler.set_timesteps(__A )
elif num_inference_steps is not None and not hasattr(__A ,'set_timesteps' ):
A = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A = [residual + 0.2, residual + 0.15, residual + 0.10]
A = dummy_past_residuals[: scheduler.config.solver_order]
A = scheduler.timesteps[5]
A = scheduler.timesteps[6]
A = scheduler.step(__A ,__A ,__A ,**__A ).prev_sample
A = scheduler.step(__A ,__A ,__A ,**__A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
A = DEISMultistepScheduler(**self.get_scheduler_config() )
A = self.full_loop(scheduler=__A )
A = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
A = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A = DPMSolverMultistepScheduler.from_config(scheduler.config )
A = UniPCMultistepScheduler.from_config(scheduler.config )
A = DEISMultistepScheduler.from_config(scheduler.config )
A = self.full_loop(scheduler=__A )
A = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
self.check_over_configs(thresholding=__A )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__A ,prediction_type=__A ,sample_max_value=__A ,algorithm_type='deis' ,solver_order=__A ,solver_type=__A ,)
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__A ,solver_type=__A ,prediction_type=__A ,algorithm_type=__A ,)
A = self.full_loop(
solver_order=__A ,solver_type=__A ,prediction_type=__A ,algorithm_type=__A ,)
assert not torch.isnan(__A ).any(), "Samples have nan numbers"
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
self.check_over_configs(lower_order_final=__A )
self.check_over_configs(lower_order_final=__A )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__A ,time_step=0 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
A = self.full_loop()
A = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
A = self.full_loop(prediction_type='v_prediction' )
A = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.0_91 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config(thresholding=__A ,dynamic_thresholding_ratio=0 )
A = scheduler_class(**__A )
A = 10
A = self.dummy_model()
A = self.dummy_sample_deter.half()
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.timesteps ):
A = model(__A ,__A )
A = scheduler.step(__A ,__A ,__A ).prev_sample
assert sample.dtype == torch.floataa | 74 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : str = "decision_transformer"
__A : Union[str, Any] = ["past_key_values"]
__A : Optional[int] = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __A=17 , __A=4 , __A=128 , __A=4096 , __A=True , __A=1 , __A=1024 , __A=3 , __A=1 , __A=None , __A="relu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=1e-5 , __A=0.02 , __A=True , __A=True , __A=5_0256 , __A=5_0256 , __A=False , __A=False , **__A , ):
"""simple docstring"""
lowerCamelCase : List[str] = state_dim
lowerCamelCase : Tuple = act_dim
lowerCamelCase : List[str] = hidden_size
lowerCamelCase : Optional[Any] = max_ep_len
lowerCamelCase : Union[str, Any] = action_tanh
lowerCamelCase : int = vocab_size
lowerCamelCase : List[Any] = n_positions
lowerCamelCase : Dict = n_layer
lowerCamelCase : int = n_head
lowerCamelCase : List[Any] = n_inner
lowerCamelCase : Any = activation_function
lowerCamelCase : Optional[int] = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Tuple = attn_pdrop
lowerCamelCase : List[Any] = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Optional[int] = scale_attn_weights
lowerCamelCase : List[Any] = use_cache
lowerCamelCase : Tuple = scale_attn_by_inverse_layer_idx
lowerCamelCase : Optional[int] = reorder_and_upcast_attn
lowerCamelCase : Dict = bos_token_id
lowerCamelCase : Any = eos_token_id
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
| 283 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : bool , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : float ):
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
)
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCAmelCase : Optional[Any] = math.log(len(lowerCAmelCase__ ) , 2 )
print(f'Optimal value : {minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 16 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = LDMTextToImagePipeline
_SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_SCREAMING_SNAKE_CASE : List[Any] = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : List[str] = False
def __A ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
__UpperCAmelCase : Any = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__UpperCAmelCase : Tuple = CLIPTextModel(__UpperCAmelCase )
__UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCAmelCase : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Any:
'''simple docstring'''
if str(__UpperCAmelCase ).startswith("""mps""" ):
__UpperCAmelCase : int = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCAmelCase : List[str] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCAmelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : Tuple = LDMTextToImagePipeline(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = pipe(**__UpperCAmelCase ).images
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__UpperCAmelCase : Dict = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def __A ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = torch.manual_seed(__UpperCAmelCase )
__UpperCAmelCase : int = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 32, 32) )
__UpperCAmelCase : int = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
__UpperCAmelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Any = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = self.get_inputs(__UpperCAmelCase )
__UpperCAmelCase : int = pipe(**__UpperCAmelCase ).images
__UpperCAmelCase : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__UpperCAmelCase : Tuple = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] )
__UpperCAmelCase : Union[str, Any] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class _A ( unittest.TestCase ):
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = torch.manual_seed(__UpperCAmelCase )
__UpperCAmelCase : List[Any] = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 32, 32) )
__UpperCAmelCase : int = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = self.get_inputs(__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = pipe(**__UpperCAmelCase ).images[0]
__UpperCAmelCase : Tuple = load_numpy(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" )
__UpperCAmelCase : Dict = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 1 |
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE = 100 ):
"""simple docstring"""
UpperCamelCase = set()
UpperCamelCase = 0
UpperCamelCase = n + 1 # maximum limit
for a in range(2 , _SCREAMING_SNAKE_CASE ):
for b in range(2 , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = a**b # calculates the current power
collect_powers.add(_SCREAMING_SNAKE_CASE ) # adds the result to the set
return len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 153 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _a ( unittest.TestCase ):
def __init__( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Dict=7 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: List[Any]=18 , UpperCamelCase_: Dict=30 , UpperCamelCase_: Optional[int]=400 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: List[str]=True , UpperCamelCase_: Dict=None , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Tuple=[0.5, 0.5, 0.5] , UpperCamelCase_: List[str]=[0.5, 0.5, 0.5] , ) -> int:
"""simple docstring"""
lowercase__ = size if size is not None else {'''shortest_edge''': 18}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = LevitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: List[Any] ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCamelCase_ ( self: str ) -> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCamelCase_ ( self: List[str] ) -> Dict:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: List[str] ) -> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self: Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__ = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 110 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Dict , lowercase_ : Optional[int] , lowercase_ : List[Any]=7 , lowercase_ : Any=3 , lowercase_ : str=10 , lowercase_ : int=18 , lowercase_ : int=30 , lowercase_ : int=400 , lowercase_ : Dict=True , lowercase_ : Tuple=None , lowercase_ : Tuple=True , lowercase_ : Any=[0.5, 0.5, 0.5] , lowercase_ : List[Any]=[0.5, 0.5, 0.5] , lowercase_ : Dict=None , ):
snake_case_ = size if size is not None else {'shortest_edge': 18}
snake_case_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = num_frames
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = crop_size
def A_ ( self : Dict ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a ( _a , unittest.TestCase ):
snake_case_ = VivitImageProcessor if is_vision_available() else None
def A_ ( self : Union[str, Any] ):
snake_case_ = VivitImageProcessingTester(self )
@property
def A_ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Union[str, Any] ):
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , '''image_mean''' ) )
self.assertTrue(hasattr(lowercase_ , '''image_std''' ) )
self.assertTrue(hasattr(lowercase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowercase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowercase_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowercase_ , '''size''' ) )
def A_ ( self : Optional[int] ):
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def A_ ( self : Optional[int] ):
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
snake_case_ = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for video in video_inputs:
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
snake_case_ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ = image_processing(lowercase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A_ ( self : Optional[int] ):
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for video in video_inputs:
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
snake_case_ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ = image_processing(lowercase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A_ ( self : List[str] ):
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for video in video_inputs:
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ = image_processing(lowercase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 367 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
a : Tuple = 6_378_137.0
a : int = 6_356_752.314_245
a : Dict = 637_8137
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float:
'''simple docstring'''
snake_case_ = (AXIS_A - AXIS_B) / AXIS_A
snake_case_ = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
snake_case_ = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
snake_case_ = radians(__UpperCAmelCase )
snake_case_ = radians(__UpperCAmelCase )
# Equation
snake_case_ = sin((phi_a - phi_a) / 2 )
snake_case_ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
snake_case_ = sqrt(sin_sq_phi + (cos(__UpperCAmelCase ) * cos(__UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = ort.SessionOptions()
lowercase_ = False
return options
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""")
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""")
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""")
# using the PNDM scheduler by default
lowercase_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase_)
lowercase_ = """A red cat sitting on a park bench"""
lowercase_ = np.random.RandomState(0)
lowercase_ = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=lowerCamelCase_ , output_type="""np""" , )
lowercase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 136 | import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def lowercase( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase = ord(UpperCamelCase_ )
if not _is_chinese_char(UpperCamelCase_ ):
return 0
return 1
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = set()
for token in tokens:
UpperCamelCase = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ )
if chinese_word:
word_set.add(UpperCamelCase_ )
UpperCamelCase = list(UpperCamelCase_ )
return word_list
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
UpperCamelCase = max([len(UpperCamelCase_ ) for w in chinese_word_set] )
UpperCamelCase = bert_tokens
UpperCamelCase , UpperCamelCase = 0, len(UpperCamelCase_ )
while start < end:
UpperCamelCase = True
if is_chinese(bert_word[start] ):
UpperCamelCase = min(end - start , UpperCamelCase_ )
for i in range(UpperCamelCase_ , 1 , -1 ):
UpperCamelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase = """##""" + bert_word[j]
UpperCamelCase = start + i
UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
UpperCamelCase = [get_chinese_word(UpperCamelCase_ ) for r in res]
ltp_res.extend(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = []
for id in input_ids:
UpperCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase_ )
input_tokens.append(UpperCamelCase_ )
UpperCamelCase = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase_ ):
if token[:2] == "##":
UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ):
ref_id.append(UpperCamelCase_ )
ref_ids.append(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
return ref_ids
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase = LTP(args.ltp ) # faster in GPU device
UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
UpperCamelCase = [json.dumps(UpperCamelCase_ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 343 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=13,__SCREAMING_SNAKE_CASE=7,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=99,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE=5,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=37,__SCREAMING_SNAKE_CASE="gelu",__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=5_12,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size],self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size],self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__SCREAMING_SNAKE_CASE,initializer_range=self.initializer_range,use_stable_embedding=__SCREAMING_SNAKE_CASE,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = OpenLlamaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,encoder_attention_mask=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = OpenLlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,encoder_attention_mask=__SCREAMING_SNAKE_CASE,use_cache=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3),config.vocab_size )
__lowerCAmelCase = ids_tensor((self.batch_size, 3),vocab_size=2 )
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens],dim=-1 )
__lowerCAmelCase = torch.cat([input_mask, next_mask],dim=-1 )
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,encoder_attention_mask=__SCREAMING_SNAKE_CASE,output_hidden_states=__SCREAMING_SNAKE_CASE,)["""hidden_states"""][0]
__lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,encoder_attention_mask=__SCREAMING_SNAKE_CASE,past_key_values=__SCREAMING_SNAKE_CASE,output_hidden_states=__SCREAMING_SNAKE_CASE,)["""hidden_states"""][0]
# select random slice
__lowerCAmelCase = ids_tensor((1,),output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,atol=1e-3 ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : Union[str, Any] =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
a : Optional[Any] =(OpenLlamaForCausalLM,) if is_torch_available() else ()
a : Dict =(
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Dict =False
a : List[Any] =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = OpenLlamaModelTester(self )
__lowerCAmelCase = ConfigTester(self,config_class=__SCREAMING_SNAKE_CASE,hidden_size=37 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = input_dict["""input_ids"""]
__lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = ids_tensor([self.model_tester.batch_size],self.model_tester.type_sequence_label_size )
__lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = """single_label_classification"""
__lowerCAmelCase = input_dict["""input_ids"""]
__lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = ids_tensor([self.model_tester.batch_size],self.model_tester.type_sequence_label_size )
__lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = """multi_label_classification"""
__lowerCAmelCase = input_dict["""input_ids"""]
__lowerCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels],self.model_tester.type_sequence_label_size ).to(torch.float )
__lowerCAmelCase = OpenLlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = ids_tensor([1, 10],config.vocab_size )
__lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )],config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
original_model.eval()
__lowerCAmelCase = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
__lowerCAmelCase = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase = {"""type""": scaling_type, """factor""": 10.0}
__lowerCAmelCase = OpenLlamaModel(__SCREAMING_SNAKE_CASE )
scaled_model.to(__SCREAMING_SNAKE_CASE )
scaled_model.eval()
__lowerCAmelCase = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
__lowerCAmelCase = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,atol=1e-5 ) )
| 46 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "geglu",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = "layer_norm",__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = only_cross_attention
__lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
__lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
f' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__lowerCAmelCase = AdaLayerNorm(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase = AdaLayerNormZero(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.LayerNorm(__SCREAMING_SNAKE_CASE,elementwise_affine=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = Attention(
query_dim=__SCREAMING_SNAKE_CASE,heads=__SCREAMING_SNAKE_CASE,dim_head=__SCREAMING_SNAKE_CASE,dropout=__SCREAMING_SNAKE_CASE,bias=__SCREAMING_SNAKE_CASE,cross_attention_dim=cross_attention_dim if only_cross_attention else None,upcast_attention=__SCREAMING_SNAKE_CASE,)
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__lowerCAmelCase = (
AdaLayerNorm(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm
else nn.LayerNorm(__SCREAMING_SNAKE_CASE,elementwise_affine=__SCREAMING_SNAKE_CASE )
)
__lowerCAmelCase = Attention(
query_dim=__SCREAMING_SNAKE_CASE,cross_attention_dim=cross_attention_dim if not double_self_attention else None,heads=__SCREAMING_SNAKE_CASE,dim_head=__SCREAMING_SNAKE_CASE,dropout=__SCREAMING_SNAKE_CASE,bias=__SCREAMING_SNAKE_CASE,upcast_attention=__SCREAMING_SNAKE_CASE,) # is self-attn if encoder_hidden_states is none
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
# 3. Feed-forward
__lowerCAmelCase = nn.LayerNorm(__SCREAMING_SNAKE_CASE,elementwise_affine=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = FeedForward(__SCREAMING_SNAKE_CASE,dropout=__SCREAMING_SNAKE_CASE,activation_fn=__SCREAMING_SNAKE_CASE,final_dropout=__SCREAMING_SNAKE_CASE )
# let chunk size default to None
__lowerCAmelCase = None
__lowerCAmelCase = 0
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = chunk_size
__lowerCAmelCase = dim
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
if self.use_ada_layer_norm:
__lowerCAmelCase = self.norma(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.norma(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,hidden_dtype=hidden_states.dtype )
else:
__lowerCAmelCase = self.norma(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__lowerCAmelCase = self.attna(
__SCREAMING_SNAKE_CASE,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,attention_mask=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = gate_msa.unsqueeze(1 ) * attn_output
__lowerCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__lowerCAmelCase = (
self.norma(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) if self.use_ada_layer_norm else self.norma(__SCREAMING_SNAKE_CASE )
)
__lowerCAmelCase = self.attna(
__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = attn_output + hidden_states
# 3. Feed-forward
__lowerCAmelCase = self.norma(__SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
__lowerCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__lowerCAmelCase = torch.cat(
[self.ff(__SCREAMING_SNAKE_CASE ) for hid_slice in norm_hidden_states.chunk(__SCREAMING_SNAKE_CASE,dim=self._chunk_dim )],dim=self._chunk_dim,)
else:
__lowerCAmelCase = self.ff(__SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output
__lowerCAmelCase = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = 4,__SCREAMING_SNAKE_CASE = 0.0,__SCREAMING_SNAKE_CASE = "geglu",__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = int(dim * mult )
__lowerCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__lowerCAmelCase = GELU(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if activation_fn == "gelu-approximate":
__lowerCAmelCase = GELU(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,approximate="""tanh""" )
elif activation_fn == "geglu":
__lowerCAmelCase = GEGLU(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
elif activation_fn == "geglu-approximate":
__lowerCAmelCase = ApproximateGELU(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.ModuleList([] )
# project in
self.net.append(__SCREAMING_SNAKE_CASE )
# project dropout
self.net.append(nn.Dropout(__SCREAMING_SNAKE_CASE ) )
# project out
self.net.append(nn.Linear(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for module in self.net:
__lowerCAmelCase = module(__SCREAMING_SNAKE_CASE )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "none" ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = approximate
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__SCREAMING_SNAKE_CASE,approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ),approximate=self.approximate ).to(dtype=gate.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.proj(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.gelu(__SCREAMING_SNAKE_CASE )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,dim_out * 2 )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__SCREAMING_SNAKE_CASE )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.proj(__SCREAMING_SNAKE_CASE ).chunk(2,dim=-1 )
return hidden_states * self.gelu(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.proj(__SCREAMING_SNAKE_CASE )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Embedding(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,embedding_dim * 2 )
__lowerCAmelCase = nn.LayerNorm(__SCREAMING_SNAKE_CASE,elementwise_affine=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.linear(self.silu(self.emb(__SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2 )
__lowerCAmelCase = self.norm(__SCREAMING_SNAKE_CASE ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = CombinedTimestepLabelEmbeddings(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,6 * embedding_dim,bias=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.LayerNorm(__SCREAMING_SNAKE_CASE,elementwise_affine=__SCREAMING_SNAKE_CASE,eps=1e-6 )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = self.linear(self.silu(self.emb(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,hidden_dtype=__SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = emb.chunk(6,dim=1 )
__lowerCAmelCase = self.norm(__SCREAMING_SNAKE_CASE ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = 1e-5 ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = num_groups
__lowerCAmelCase = eps
if act_fn is None:
__lowerCAmelCase = None
else:
__lowerCAmelCase = get_activation(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,out_dim * 2 )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.act:
__lowerCAmelCase = self.act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.linear(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = emb[:, :, None, None]
__lowerCAmelCase , __lowerCAmelCase = emb.chunk(2,dim=1 )
__lowerCAmelCase = F.group_norm(__SCREAMING_SNAKE_CASE,self.num_groups,eps=self.eps )
__lowerCAmelCase = x * (1 + scale) + shift
return x
| 46 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCamelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : Dict = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Union[PIL.Image.Image, np.ndarray]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Union[str, Any] , UpperCamelCase : PriorTransformer , UpperCamelCase : CLIPVisionModel , UpperCamelCase : CLIPImageProcessor , UpperCamelCase : HeunDiscreteScheduler , UpperCamelCase : ShapERenderer , ):
'''simple docstring'''
super().__init__()
self.register_modules(
prior=UpperCamelCase , image_encoder=UpperCamelCase , image_processor=UpperCamelCase , scheduler=UpperCamelCase , renderer=UpperCamelCase , )
def UpperCamelCase__ (self : str , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : Any ):
'''simple docstring'''
if latents is None:
lowercase__ = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
lowercase__ = latents.to(UpperCamelCase )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Any=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ = torch.device(f"cuda:{gpu_id}" )
lowercase__ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase , UpperCamelCase )
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def UpperCamelCase__ (self : Tuple , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
lowercase__ = torch.cat(UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase , axis=0 )
if not isinstance(UpperCamelCase , torch.Tensor ):
lowercase__ = self.image_processor(UpperCamelCase , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
lowercase__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase )
lowercase__ = self.image_encoder(UpperCamelCase )['''last_hidden_state''']
lowercase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase__ = image_embeds.repeat_interleave(UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
lowercase__ = torch.zeros_like(UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCamelCase )
def __call__(self : str , UpperCamelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase : int = 1 , UpperCamelCase : int = 25 , UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : float = 4.0 , UpperCamelCase : int = 64 , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , ):
'''simple docstring'''
if isinstance(UpperCamelCase , PIL.Image.Image ):
lowercase__ = 1
elif isinstance(UpperCamelCase , torch.Tensor ):
lowercase__ = image.shape[0]
elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowercase__ = len(UpperCamelCase )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase )}" )
lowercase__ = self._execution_device
lowercase__ = batch_size * num_images_per_prompt
lowercase__ = guidance_scale > 1.0
lowercase__ = self._encode_image(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# prior
self.scheduler.set_timesteps(UpperCamelCase , device=UpperCamelCase )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.prior.config.num_embeddings
lowercase__ = self.prior.config.embedding_dim
lowercase__ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase , UpperCamelCase , UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase__ = latents.reshape(latents.shape[0] , UpperCamelCase , UpperCamelCase )
for i, t in enumerate(self.progress_bar(UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
lowercase__ = self.prior(
UpperCamelCase , timestep=UpperCamelCase , proj_embedding=UpperCamelCase , ).predicted_image_embedding
# remove the variance
lowercase__ ,lowercase__ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase__ ,lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase__ = self.scheduler.step(
UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCamelCase )
lowercase__ = []
for i, latent in enumerate(UpperCamelCase ):
print()
lowercase__ = self.renderer.decode(
latent[None, :] , UpperCamelCase , size=UpperCamelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCamelCase )
lowercase__ = torch.stack(UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
lowercase__ = images.cpu().numpy()
if output_type == "pil":
lowercase__ = [self.numpy_to_pil(UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCamelCase )
| 2 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ShapEImgaImgPipeline
lowerCAmelCase__ : List[str] = ["""image"""]
lowerCAmelCase__ : Any = ["""image"""]
lowerCAmelCase__ : Any = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase__ : Tuple = False
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
return 8
@property
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(UpperCamelCase )
return model
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase , do_normalize=UpperCamelCase , do_resize=UpperCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase__ = PriorTransformer(**UpperCamelCase )
return model
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**UpperCamelCase )
return model
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase , clip_sample=UpperCamelCase , clip_sample_range=1.0 , )
lowercase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=0 ):
'''simple docstring'''
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if str(UpperCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase )
else:
lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowercase__ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = torch_device == '''cpu'''
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(UpperCamelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**UpperCamelCase , num_images_per_prompt=UpperCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
lowercase__ = pipe(
UpperCamelCase , generator=UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 2 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 139 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Union[str, Any] =MBartConfig
lowerCamelCase : Optional[Any] ={}
lowerCamelCase : Dict ="gelu"
def __init__( self : str , lowerCAmelCase : Any , lowerCAmelCase : List[Any]=13 , lowerCAmelCase : List[str]=7 , lowerCAmelCase : List[str]=True , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Union[str, Any]=99 , lowerCAmelCase : List[str]=32 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Any=37 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Dict=20 , lowerCAmelCase : Any=2 , lowerCAmelCase : Union[str, Any]=1 , lowerCAmelCase : str=0 , ) -> Any:
"""simple docstring"""
__lowerCAmelCase : str = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : int = seq_length
__lowerCAmelCase : Tuple = is_training
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : int = num_attention_heads
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : Dict = hidden_dropout_prob
__lowerCAmelCase : List[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = eos_token_id
__lowerCAmelCase : Optional[Any] = pad_token_id
__lowerCAmelCase : int = bos_token_id
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase : int = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCAmelCase : Tuple = prepare_mbart_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = TFMBartModel(config=lowerCAmelCase ).get_decoder()
__lowerCAmelCase : Tuple = inputs_dict["""input_ids"""]
__lowerCAmelCase : Optional[Any] = input_ids[:1, :]
__lowerCAmelCase : Union[str, Any] = inputs_dict["""attention_mask"""][:1, :]
__lowerCAmelCase : Tuple = inputs_dict["""head_mask"""]
__lowerCAmelCase : Any = 1
# first forward pass
__lowerCAmelCase : List[Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
__lowerCAmelCase ,__lowerCAmelCase : List[str] = outputs.to_tuple()
__lowerCAmelCase : Union[str, Any] = past_key_values[1]
def snake_case_ (__A : str , __A : Union[str, Any] , __A : Tuple , __A : Tuple=None , __A : Optional[Any]=None , __A : Optional[Any]=None , __A : Optional[int]=None , __A : Optional[Any]=None , ) -> int:
if attention_mask is None:
__lowerCAmelCase : Dict = tf.cast(tf.math.not_equal(__A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCAmelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCAmelCase : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] =(TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCamelCase : List[str] =(TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase : Union[str, Any] =(
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase : str =True
lowerCamelCase : Tuple =False
lowerCamelCase : Dict =False
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = TFMBartModelTester(self )
__lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] =[
" UN Chief Says There Is No Military Solution in Syria",
]
lowerCamelCase : Tuple =[
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
lowerCamelCase : List[Any] ="facebook/mbart-large-en-ro"
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.translate_src_text(**lowerCAmelCase )
self.assertListEqual(self.expected_text , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = self.tokenizer(self.src_text , **lowerCAmelCase , return_tensors="""tf""" )
__lowerCAmelCase : Union[str, Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__lowerCAmelCase : List[str] = self.tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
return generated_words
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 139 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __lowercase (_snake_case ):
_UpperCamelCase = """unispeech-sat"""
def __init__( self , A_=32 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.1 , A_=0.1 , A_=0.02 , A_=1e-5 , A_="group" , A_="gelu" , A_=(512, 512, 512, 512, 512, 512, 512) , A_=(5, 2, 2, 2, 2, 2, 2) , A_=(10, 3, 3, 3, 3, 2, 2) , A_=False , A_=128 , A_=16 , A_=False , A_=True , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_=320 , A_=2 , A_=0.1 , A_=100 , A_=256 , A_=256 , A_=0.1 , A_="mean" , A_=False , A_=False , A_=256 , A_=(512, 512, 512, 512, 1500) , A_=(5, 3, 3, 1, 1) , A_=(1, 2, 3, 1, 1) , A_=512 , A_=0 , A_=1 , A_=2 , A_=504 , **A_ , ) ->str:
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : Optional[Any] = feat_extract_norm
__lowerCAmelCase : List[Any] = feat_extract_activation
__lowerCAmelCase : Dict = list(__snake_case )
__lowerCAmelCase : Optional[Any] = list(__snake_case )
__lowerCAmelCase : Any = list(__snake_case )
__lowerCAmelCase : Dict = conv_bias
__lowerCAmelCase : int = num_conv_pos_embeddings
__lowerCAmelCase : Optional[Any] = num_conv_pos_embedding_groups
__lowerCAmelCase : str = len(self.conv_dim )
__lowerCAmelCase : Any = num_hidden_layers
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : Any = hidden_act
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : Optional[Any] = hidden_dropout
__lowerCAmelCase : str = attention_dropout
__lowerCAmelCase : Optional[int] = activation_dropout
__lowerCAmelCase : int = feat_proj_dropout
__lowerCAmelCase : List[Any] = final_dropout
__lowerCAmelCase : int = layerdrop
__lowerCAmelCase : Tuple = layer_norm_eps
__lowerCAmelCase : int = initializer_range
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : List[str] = num_clusters
__lowerCAmelCase : Dict = do_stable_layer_norm
__lowerCAmelCase : str = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase : List[str] = apply_spec_augment
__lowerCAmelCase : int = mask_time_prob
__lowerCAmelCase : Dict = mask_time_length
__lowerCAmelCase : Union[str, Any] = mask_time_min_masks
__lowerCAmelCase : str = mask_feature_prob
__lowerCAmelCase : List[str] = mask_feature_length
__lowerCAmelCase : Optional[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowerCAmelCase : Optional[int] = num_codevectors_per_group
__lowerCAmelCase : Optional[int] = num_codevector_groups
__lowerCAmelCase : Any = contrastive_logits_temperature
__lowerCAmelCase : Tuple = feat_quantizer_dropout
__lowerCAmelCase : str = num_negatives
__lowerCAmelCase : str = codevector_dim
__lowerCAmelCase : Optional[int] = proj_codevector_dim
__lowerCAmelCase : str = diversity_loss_weight
# ctc loss
__lowerCAmelCase : List[Any] = ctc_loss_reduction
__lowerCAmelCase : Any = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase : Tuple = list(__snake_case )
__lowerCAmelCase : Optional[int] = list(__snake_case )
__lowerCAmelCase : Optional[Any] = list(__snake_case )
__lowerCAmelCase : Dict = xvector_output_dim
@property
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 275 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case="resnet50" , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=True , __snake_case=True , ):
_SCREAMING_SNAKE_CASE : Tuple = parent
_SCREAMING_SNAKE_CASE : Optional[int] = out_indices if out_indices is not None else [4]
_SCREAMING_SNAKE_CASE : str = stage_names
_SCREAMING_SNAKE_CASE : List[str] = out_features
_SCREAMING_SNAKE_CASE : int = backbone
_SCREAMING_SNAKE_CASE : Any = batch_size
_SCREAMING_SNAKE_CASE : List[str] = image_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
_SCREAMING_SNAKE_CASE : int = use_pretrained_backbone
_SCREAMING_SNAKE_CASE : Optional[Any] = is_training
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, pixel_values
def UpperCAmelCase_ ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = TimmBackbone(config=__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[Any] = model(__snake_case )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase__ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = (TimmBackbone,) if is_torch_available() else ()
A_ : Tuple = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
A_ : Optional[Any] = False
A_ : List[Any] = False
A_ : Dict = False
A_ : int = False
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = TimmBackboneModelTester(self )
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def UpperCAmelCase_ ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[int] = """resnet18"""
_SCREAMING_SNAKE_CASE : Tuple = """microsoft/resnet-18"""
_SCREAMING_SNAKE_CASE : List[str] = AutoBackbone.from_pretrained(__snake_case , use_timm_backbone=__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = AutoBackbone.from_pretrained(__snake_case )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoBackbone.from_pretrained(__snake_case , use_timm_backbone=__snake_case , out_indices=[1, 2, 3] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoBackbone.from_pretrained(__snake_case , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : List[str] = model_class(__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : List[str] = self.has_attentions
# no need to test all models as different heads yield the same functionality
_SCREAMING_SNAKE_CASE : str = self.all_model_classes[0]
_SCREAMING_SNAKE_CASE : str = model_class(__snake_case )
model.to(__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Tuple = model(**__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = outputs[0][-1]
# Encoder-/Decoder-only models
_SCREAMING_SNAKE_CASE : str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__snake_case )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
_SCREAMING_SNAKE_CASE : List[str] = model(**__snake_case )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = None
_SCREAMING_SNAKE_CASE : Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**__snake_case )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_SCREAMING_SNAKE_CASE : str = copy.deepcopy(__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
_SCREAMING_SNAKE_CASE : List[Any] = model(**__snake_case )
| 200 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Optional[int] = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_lowerCamelCase : Any = {
'''gpt-neox-20b''': 20_48,
}
class lowercase ( a ):
lowercase__ : Any = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : str=None , _UpperCamelCase : Union[str, Any]="<|endoftext|>" , _UpperCamelCase : Optional[Any]="<|endoftext|>" , _UpperCamelCase : Union[str, Any]="<|endoftext|>" , _UpperCamelCase : Any=False , **_UpperCamelCase : str , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _UpperCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = add_prefix_space
def __snake_case( self : str , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : "Conversation" ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] )
if len(_UpperCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 206 | import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : int = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
_lowerCamelCase : List[Any] = {
'''allenai/led-base-16384''': 1_63_84,
}
class lowercase ( a ):
lowercase__ : Tuple = VOCAB_FILES_NAMES
lowercase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[Any] = LEDTokenizer
lowercase__ : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] , _UpperCamelCase : Tuple=None , _UpperCamelCase : str=None , _UpperCamelCase : Tuple=None , _UpperCamelCase : List[str]="replace" , _UpperCamelCase : str="<s>" , _UpperCamelCase : List[Any]="</s>" , _UpperCamelCase : List[Any]="</s>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="<unk>" , _UpperCamelCase : List[Any]="<pad>" , _UpperCamelCase : Tuple="<mask>" , _UpperCamelCase : List[str]=False , _UpperCamelCase : List[Any]=True , **_UpperCamelCase : Optional[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , errors=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _UpperCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = "post_processor"
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , _UpperCamelCase , _UpperCamelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state["sep"] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state["cls"] )
SCREAMING_SNAKE_CASE = False
if state.get("add_prefix_space" , _UpperCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get("trim_offsets" , _UpperCamelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , state.pop("type" ) )
SCREAMING_SNAKE_CASE = component_class(**_UpperCamelCase )
setattr(self.backend_tokenizer , _UpperCamelCase , _UpperCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __snake_case( self : int ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case( self : Optional[int] , _UpperCamelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else value
SCREAMING_SNAKE_CASE = value
def __snake_case( self : List[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) -> BatchEncoding:
'''simple docstring'''
SCREAMING_SNAKE_CASE = kwargs.get("is_split_into_words" , _UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : int , *_UpperCamelCase : Dict , **_UpperCamelCase : Tuple ) -> BatchEncoding:
'''simple docstring'''
SCREAMING_SNAKE_CASE = kwargs.get("is_split_into_words" , _UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : int=None ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case( self : Optional[Any] , _UpperCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=_UpperCamelCase , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs["global_attention_mask"] ) != len(_UpperCamelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 206 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str], UpperCAmelCase__ : Tuple, ):
__lowercase = parent
__lowercase = 1_3
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = 9_9
__lowercase = 3_2
__lowercase = 2
__lowercase = 4
__lowercase = 3_7
__lowercase = "gelu"
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 5_1_2
__lowercase = 1_6
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = None
def _lowercase ( self : int ):
__lowercase = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
__lowercase = ids_tensor([self.batch_size], self.num_choices )
__lowercase = DistilBertConfig(
vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : str, UpperCAmelCase__ : Dict, UpperCAmelCase__ : int, UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[int] ):
__lowercase = TFDistilBertModel(config=UpperCAmelCase__ )
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
__lowercase = model(UpperCAmelCase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[str] ):
__lowercase = TFDistilBertForMaskedLM(config=UpperCAmelCase__ )
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : int, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any ):
__lowercase = TFDistilBertForQuestionAnswering(config=UpperCAmelCase__ )
__lowercase = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def _lowercase ( self : Any, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Any ):
__lowercase = self.num_labels
__lowercase = TFDistilBertForSequenceClassification(UpperCAmelCase__ )
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Dict, UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Tuple ):
__lowercase = self.num_choices
__lowercase = TFDistilBertForMultipleChoice(UpperCAmelCase__ )
__lowercase = tf.tile(tf.expand_dims(UpperCAmelCase__, 1 ), (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(UpperCAmelCase__, 1 ), (1, self.num_choices, 1) )
__lowercase = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : str, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ):
__lowercase = self.num_labels
__lowercase = TFDistilBertForTokenClassification(UpperCAmelCase__ )
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Dict ):
__lowercase = self.prepare_config_and_inputs()
((__lowercase) ,(__lowercase) ,(__lowercase) ,(__lowercase) ,(__lowercase) ,(__lowercase)) = config_and_inputs
__lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__UpperCAmelCase : Union[str, Any] = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : str = False
__UpperCAmelCase : List[str] = False
def _lowercase ( self : Optional[int] ):
__lowercase = TFDistilBertModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, dim=3_7 )
def _lowercase ( self : str ):
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase__ )
def _lowercase ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase__ )
def _lowercase ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase__ )
def _lowercase ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase__ )
@slow
def _lowercase ( self : str ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__lowercase = TFDistilBertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Union[str, Any] ):
__lowercase = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(UpperCAmelCase__ )[0]
__lowercase = [1, 6, 7_6_8]
self.assertEqual(output.shape, UpperCAmelCase__ )
__lowercase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3], UpperCAmelCase__, atol=1E-4 )
| 17 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> YolosConfig:
"""simple docstring"""
lowerCamelCase__: str =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__: int =192
lowerCamelCase__: Optional[int] =768
lowerCamelCase__: Any =12
lowerCamelCase__: str =3
lowerCamelCase__: Optional[int] =[800, 1333]
lowerCamelCase__: Union[str, Any] =False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: int =330
lowerCamelCase__: Optional[Any] =14
lowerCamelCase__: Any =6
lowerCamelCase__: List[str] =1320
elif "yolos_s" in yolos_name:
lowerCamelCase__: List[str] =384
lowerCamelCase__: Union[str, Any] =1536
lowerCamelCase__: List[Any] =12
lowerCamelCase__: Any =6
elif "yolos_b" in yolos_name:
lowerCamelCase__: str =[800, 1344]
lowerCamelCase__: int =91
lowerCamelCase__: str ="huggingface/label-files"
lowerCamelCase__: List[str] ="coco-detection-id2label.json"
lowerCamelCase__: Tuple =json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
lowerCamelCase__: Dict ={int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__: List[str] =idalabel
lowerCamelCase__: int ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( __a , __a , __a = False ) -> Dict:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__: Optional[int] =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase__: Dict =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__: Union[str, Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__: str =in_proj_bias[: config.hidden_size]
lowerCamelCase__: str =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__: str =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__: Optional[int] =in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__: List[Any] =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if "backbone" in name:
lowerCamelCase__: Optional[Any] =name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase__: Optional[int] =name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase__: str =name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase__: Tuple =name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase__: Any =name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase__: List[Any] =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase__: Union[str, Any] =name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase__: Any =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase__: Optional[int] =name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase__: int =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase__: int =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase__: List[str] =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase__: Any =name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase__: Dict =name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase__: List[str] =name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase__: Any =name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCAmelCase_ ( __a , __a ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__: Any =orig_state_dict.pop(__a )
if "qkv" in key:
lowerCamelCase__: Tuple =key.split("." )
lowerCamelCase__: List[str] =int(key_split[2] )
lowerCamelCase__: Tuple =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__: int =val[:dim, :]
lowerCamelCase__: str =val[
dim : dim * 2, :
]
lowerCamelCase__: Any =val[-dim:, :]
else:
lowerCamelCase__: Tuple =val[:dim]
lowerCamelCase__: Optional[Any] =val[dim : dim * 2]
lowerCamelCase__: str =val[-dim:]
else:
lowerCamelCase__: Dict =val
return orig_state_dict
def lowerCAmelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase__: Any ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__: Optional[Any] =Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =get_yolos_config(__a )
# load original state_dict
lowerCamelCase__: Optional[int] =torch.load(__a , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase__: int =YolosForObjectDetection(__a )
model.eval()
lowerCamelCase__: Union[str, Any] =convert_state_dict(__a , __a )
model.load_state_dict(__a )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__: Any =800 if yolos_name != "yolos_ti" else 512
lowerCamelCase__: Tuple =YolosImageProcessor(format="coco_detection" , size=__a )
lowerCamelCase__: str =image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase__: Tuple =model(**__a )
lowerCamelCase__ , lowerCamelCase__: List[str] =outputs.logits, outputs.pred_boxes
lowerCamelCase__ , lowerCamelCase__: Any =None, None
if yolos_name == "yolos_ti":
lowerCamelCase__: Optional[Any] =torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCamelCase__: List[Any] =torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__: Optional[int] =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCamelCase__: Any =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__: str =torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCamelCase__: Optional[Any] =torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__: str =torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCamelCase__: Union[str, Any] =torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCamelCase__: Tuple =torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCamelCase__: Optional[int] =torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __a , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __a , atol=1e-4 )
Path(__a ).mkdir(exist_ok=__a )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
lowerCamelCase__: Any ={
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase__: Optional[int] =model_mapping[yolos_name]
image_processor.push_to_hub(__a , organization="hustvl" )
model.push_to_hub(__a , organization="hustvl" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 10 | 0 |
import os
from collections.abc import Iterator
def lowerCamelCase_ ( _UpperCamelCase = "." ) -> Iterator[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(_UpperCamelCase ):
snake_case_ : str = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCamelCase , _UpperCamelCase ).lstrip('''./''' )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
return f'''{i * " "}*''' if i else "\n##"
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Tuple = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCamelCase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_UpperCamelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def lowerCamelCase_ ( _UpperCamelCase = "." ) -> None:
"""simple docstring"""
snake_case_ : Optional[int] = ''''''
for filepath in sorted(good_file_paths(_UpperCamelCase ) ):
snake_case_ : Tuple = os.path.split(_UpperCamelCase )
if filepath != old_path:
snake_case_ : List[Any] = print_path(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Any = (filepath.count(os.sep ) + 1) if filepath else 0
snake_case_ : Union[str, Any] = f'''{filepath}/{filename}'''.replace(''' ''' , '''%20''' )
snake_case_ : List[Any] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f'''{md_prefix(_UpperCamelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 369 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCAmelCase_ = {
'''facebook/blenderbot_small-90M''': 5_1_2,
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = VOCAB_FILES_NAMES
lowerCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[Any] = BlenderbotSmallTokenizer
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__="<|endoftext|>" , __magic_name__="<|endoftext|>" , __magic_name__="<|endoftext|>" , __magic_name__=False , __magic_name__=True , **__magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=__magic_name__ , merges=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , ) , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , **__magic_name__ , )
snake_case_ : Optional[int] = add_prefix_space
def lowerCamelCase (self , __magic_name__ , __magic_name__=None ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ : List[str] = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 279 | 0 |
"""simple docstring"""
_a = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 61 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : Any=3_2 , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_0 , SCREAMING_SNAKE_CASE_ : Dict=[1_0, 2_0, 3_0, 4_0] , SCREAMING_SNAKE_CASE_ : int=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : int="relu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : List[str]=None , ):
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Any = embeddings_size
lowerCAmelCase_ : Dict = hidden_sizes
lowerCAmelCase_ : Any = depths
lowerCAmelCase_ : Optional[int] = is_training
lowerCAmelCase_ : int = use_labels
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Dict = num_labels
lowerCAmelCase_ : Optional[int] = scope
lowerCAmelCase_ : Tuple = len(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Union[str, Any] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : str ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : List[Any] = TFRegNetModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : int = TFRegNetForImageClassification(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : str = self.prepare_config_and_inputs()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = config_and_inputs
lowerCAmelCase_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = TFRegNetModelTester(self )
lowerCAmelCase_ : Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : int = model_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase_ : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCAmelCase_ ,lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[int] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase_ : str = layer_type
lowerCAmelCase_ : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ ,lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str={} ):
lowerCAmelCase_ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
lowerCAmelCase_ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : int = TFRegNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase_ ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : int = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Dict = prepare_img()
lowerCAmelCase_ : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='tf' )
# forward pass
lowerCAmelCase_ : List[str] = model(**SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCAmelCase_ : Optional[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
| 224 | 0 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
_A : List[str] =637_8137.0
_A : Dict =635_6752.31_4245
_A : int =6_378_137
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float:
lowerCamelCase__ : str = (AXIS_A - AXIS_B) / AXIS_A
lowerCamelCase__ : Dict = atan((1 - flattening) * tan(radians(UpperCamelCase ) ) )
lowerCamelCase__ : Dict = atan((1 - flattening) * tan(radians(UpperCamelCase ) ) )
lowerCamelCase__ : Optional[Any] = radians(UpperCamelCase )
lowerCamelCase__ : List[Any] = radians(UpperCamelCase )
# Equation
lowerCamelCase__ : Tuple = sin((phi_a - phi_a) / 2 )
lowerCamelCase__ : List[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowerCamelCase__ : Tuple = sqrt(sin_sq_phi + (cos(UpperCamelCase ) * cos(UpperCamelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 129 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_A : str ={
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Union[str, Any] ={
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Dict ={
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Dict ={
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
_A : str ={
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
_A : int ={
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
if isinstance(UpperCamelCase , UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ) -> Any:
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
lowerCamelCase__ : Optional[Any] = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.skip_connection.weight''']
lowerCamelCase__ : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> str:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.norm.weight''']
lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.norm.bias''']
lowerCamelCase__ : List[Any] = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Any = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Union[str, Any] = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : str = torch.load(UpperCamelCase , map_location="""cpu""" )
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : Optional[int] = checkpoint["""time_embed.0.weight"""]
lowerCamelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""]
lowerCamelCase__ : int = checkpoint["""time_embed.2.weight"""]
lowerCamelCase__ : Optional[Any] = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
lowerCamelCase__ : Optional[Any] = checkpoint["""label_emb.weight"""]
lowerCamelCase__ : Tuple = checkpoint["""input_blocks.0.0.weight"""]
lowerCamelCase__ : List[str] = checkpoint["""input_blocks.0.0.bias"""]
lowerCamelCase__ : Optional[Any] = unet_config["""down_block_types"""]
lowerCamelCase__ : Any = unet_config["""layers_per_block"""]
lowerCamelCase__ : Any = unet_config["""attention_head_dim"""]
lowerCamelCase__ : List[Any] = unet_config["""block_out_channels"""]
lowerCamelCase__ : str = 1
lowerCamelCase__ : str = channels_list[0]
for i, layer_type in enumerate(UpperCamelCase ):
lowerCamelCase__ : List[Any] = channels_list[i]
lowerCamelCase__ : List[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCamelCase ):
lowerCamelCase__ : int = f'''down_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : Tuple = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : List[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCamelCase ):
lowerCamelCase__ : Tuple = f'''down_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Optional[Any] = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : str = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
lowerCamelCase__ : Any = f'''down_blocks.{i}.attentions.{j}'''
lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.1'''
lowerCamelCase__ : Tuple = convert_attention(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Tuple = f'''down_blocks.{i}.downsamplers.0'''
lowerCamelCase__ : str = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
lowerCamelCase__ : Union[str, Any] = current_channels
# hardcoded the mid-block for now
lowerCamelCase__ : Any = """mid_block.resnets.0"""
lowerCamelCase__ : Optional[Any] = """middle_block.0"""
lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : List[Any] = """mid_block.attentions.0"""
lowerCamelCase__ : Dict = """middle_block.1"""
lowerCamelCase__ : int = convert_attention(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Any = """mid_block.resnets.1"""
lowerCamelCase__ : Tuple = """middle_block.2"""
lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Any = unet_config["""up_block_types"""]
for i, layer_type in enumerate(UpperCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : int = f'''up_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Optional[Any] = f'''output_blocks.{current_layer}.0'''
lowerCamelCase__ : Any = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Dict = f'''up_blocks.{i}.upsamplers.0'''
lowerCamelCase__ : List[str] = f'''output_blocks.{current_layer-1}.1'''
lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : str = f'''up_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : List[Any] = f'''output_blocks.{current_layer}.0'''
lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
lowerCamelCase__ : Optional[Any] = f'''up_blocks.{i}.attentions.{j}'''
lowerCamelCase__ : Any = f'''output_blocks.{current_layer}.1'''
lowerCamelCase__ : Optional[int] = convert_attention(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Tuple = f'''up_blocks.{i}.upsamplers.0'''
lowerCamelCase__ : Tuple = f'''output_blocks.{current_layer-1}.2'''
lowerCamelCase__ : List[str] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Dict = checkpoint["""out.0.weight"""]
lowerCamelCase__ : Dict = checkpoint["""out.0.bias"""]
lowerCamelCase__ : Dict = checkpoint["""out.2.weight"""]
lowerCamelCase__ : Tuple = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
_A : Tuple =argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
_A : Tuple =parser.parse_args()
_A : Optional[int] =strabool(args.class_cond)
_A : List[str] =os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
_A : int =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Tuple =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_A : Any =TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
_A : str =None
_A : Optional[int] =con_pt_to_diffuser(args.unet_path, unet_config)
_A : Optional[int] =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_A : Tuple =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_A : int =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Union[str, Any] =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
_A : str =CMStochasticIterativeScheduler(**scheduler_config)
_A : Optional[Any] =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 129 | 1 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ = logging.get_logger(__name__)
a_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCamelCase__ )} )
lowerCAmelCase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
lowerCAmelCase__ = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase__ = field(
default=1_2_8 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
lowerCAmelCase__ = field(
default=6_4 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
lowerCAmelCase__ = field(
default=3_0 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
lowerCAmelCase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowerCAmelCase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
lowerCAmelCase__ = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
lowerCAmelCase__ = field(
default=2_0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
lowerCAmelCase__ = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class __lowerCAmelCase ( lowerCamelCase__ ):
lowerCAmelCase__ = """train"""
lowerCAmelCase__ = """dev"""
class __lowerCAmelCase ( lowerCamelCase__ ):
lowerCAmelCase__ = 4_2
lowerCAmelCase__ = 4_2
lowerCAmelCase__ = 4_2
lowerCAmelCase__ = 4_2
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = Split.train , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = "pt" , ):
'''simple docstring'''
__lowerCamelCase = args
__lowerCamelCase = is_language_sensitive
__lowerCamelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
__lowerCamelCase = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
__lowerCamelCase = mode
# Load data features from cache or dataset file
__lowerCamelCase = '''v2''' if args.version_2_with_negative else '''v1'''
__lowerCamelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCamelCase = cached_features_file + '''.lock'''
with FileLock(__lowerCamelCase ):
if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache:
__lowerCamelCase = time.time()
__lowerCamelCase = torch.load(__lowerCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCamelCase = self.old_features['''features''']
__lowerCamelCase = self.old_features.get('''dataset''' , __lowerCamelCase )
__lowerCamelCase = self.old_features.get('''examples''' , __lowerCamelCase )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
''' future run''' )
else:
if mode == Split.dev:
__lowerCamelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCamelCase = self.processor.get_train_examples(args.data_dir )
__lowerCamelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , )
__lowerCamelCase = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , __lowerCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.features[i]
__lowerCamelCase = torch.tensor(feature.input_ids , dtype=torch.long )
__lowerCamelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
__lowerCamelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
__lowerCamelCase = torch.tensor(feature.cls_index , dtype=torch.long )
__lowerCamelCase = torch.tensor(feature.p_mask , dtype=torch.float )
__lowerCamelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
__lowerCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCamelCase = torch.tensor(feature.start_position , dtype=torch.long )
__lowerCamelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 330 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[int] ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple =['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__snake_case : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , _a : Any , _a : Tuple=13 , _a : List[Any]=30 , _a : Optional[int]=2 , _a : Optional[int]=3 , _a : Any=True , _a : str=True , _a : Optional[Any]=32 , _a : Tuple=2 , _a : Any=4 , _a : Tuple=37 , _a : str="gelu" , _a : Any=0.1 , _a : str=0.1 , _a : Union[str, Any]=10 , _a : Optional[Any]=0.02 , _a : Tuple=3 , _a : List[str]=None , _a : Any=2 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = scope
UpperCamelCase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 2
def A_ ( self : int ):
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def A_ ( self : Optional[Any] ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A_ ( self : Optional[int] , _a : List[str] , _a : Dict , _a : Optional[Any] ):
UpperCamelCase__ = TFDeiTModel(config=__lowerCAmelCase )
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Union[str, Any] , _a : Any , _a : Dict , _a : str ):
UpperCamelCase__ = TFDeiTForMaskedImageModeling(config=__lowerCAmelCase )
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = TFDeiTForMaskedImageModeling(__lowerCAmelCase )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A_ ( self : Union[str, Any] , _a : List[str] , _a : List[str] , _a : str ):
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = TFDeiTForImageClassification(__lowerCAmelCase )
UpperCamelCase__ = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = TFDeiTForImageClassification(__lowerCAmelCase )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : List[Any] ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( lowerCAmelCase_, lowerCAmelCase_, unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_A : Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_A : List[Any] = False
_A : str = False
_A : Any = False
_A : Union[str, Any] = False
def A_ ( self : Dict ):
UpperCamelCase__ = TFDeiTModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def A_ ( self : Any ):
pass
def A_ ( self : Optional[Any] ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , tf.keras.layers.Dense ) )
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(__lowerCAmelCase )
UpperCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A_ ( self : List[Any] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase )
def A_ ( self : Dict ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def A_ ( self : str , _a : List[Any] , _a : Any , _a : List[str]=False ):
UpperCamelCase__ = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def A_ ( self : List[Any] ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = TFDeiTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A_ ( self : Dict ):
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def A_ ( self : List[Any] ):
UpperCamelCase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=__lowerCAmelCase , return_tensors='''tf''' )
# forward pass
UpperCamelCase__ = model(**__lowerCAmelCase )
# verify the logits
UpperCamelCase__ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
UpperCamelCase__ = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 358 | import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = ["""model.decoder.embed_positions.weights"""]
def lowerCamelCase_ ( UpperCamelCase__ : Any ):
'''simple docstring'''
if "emb" in name:
UpperCamelCase__ = name.replace('''emb''', '''model.decoder.embed_tokens''' )
if "transformer" in name:
UpperCamelCase__ = name.replace('''transformer''', '''model.decoder''' )
if "cross_attention" in name:
UpperCamelCase__ = name.replace('''cross_attention''', '''encoder_attn''' )
if "linear1" in name:
UpperCamelCase__ = name.replace('''linear1''', '''fc1''' )
if "linear2" in name:
UpperCamelCase__ = name.replace('''linear2''', '''fc2''' )
if "norm1" in name:
UpperCamelCase__ = name.replace('''norm1''', '''self_attn_layer_norm''' )
if "norm_cross" in name:
UpperCamelCase__ = name.replace('''norm_cross''', '''encoder_attn_layer_norm''' )
if "norm2" in name:
UpperCamelCase__ = name.replace('''norm2''', '''final_layer_norm''' )
if "out_norm" in name:
UpperCamelCase__ = name.replace('''out_norm''', '''model.decoder.layer_norm''' )
if "linears" in name:
UpperCamelCase__ = name.replace('''linears''', '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCamelCase__ = name.replace('''condition_provider.conditioners.description.output_proj''', '''enc_to_dec_proj''' )
return name
def lowerCamelCase_ ( UpperCamelCase__ : OrderedDict, UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = list(state_dict.keys() )
UpperCamelCase__ = {}
for key in keys:
UpperCamelCase__ = state_dict.pop(UpperCamelCase__ )
UpperCamelCase__ = rename_keys(UpperCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCamelCase__ = val[:hidden_size, :]
UpperCamelCase__ = val[hidden_size : 2 * hidden_size, :]
UpperCamelCase__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCamelCase__ = val
else:
UpperCamelCase__ = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCamelCase__ = 1024
UpperCamelCase__ = 24
UpperCamelCase__ = 16
elif checkpoint == "medium":
UpperCamelCase__ = 1536
UpperCamelCase__ = 48
UpperCamelCase__ = 24
elif checkpoint == "large":
UpperCamelCase__ = 2048
UpperCamelCase__ = 48
UpperCamelCase__ = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
UpperCamelCase__ = MusicgenDecoderConfig(
hidden_size=UpperCamelCase__, ffn_dim=hidden_size * 4, num_hidden_layers=UpperCamelCase__, num_attention_heads=UpperCamelCase__, )
return config
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : Any=None, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : List[Any]="cpu" ):
'''simple docstring'''
UpperCamelCase__ = MusicGen.get_pretrained(UpperCamelCase__, device=UpperCamelCase__ )
UpperCamelCase__ = decoder_config_from_checkpoint(UpperCamelCase__ )
UpperCamelCase__ = fairseq_model.lm.state_dict()
UpperCamelCase__ , UpperCamelCase__ = rename_state_dict(
UpperCamelCase__, hidden_size=decoder_config.hidden_size )
UpperCamelCase__ = TaEncoderModel.from_pretrained('''t5-base''' )
UpperCamelCase__ = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
UpperCamelCase__ = MusicgenForCausalLM(UpperCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCamelCase__ , UpperCamelCase__ = decoder.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(UpperCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
UpperCamelCase__ = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase__, audio_encoder=UpperCamelCase__, decoder=UpperCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase__ )
# check we can do a forward pass
UpperCamelCase__ = torch.arange(0, 8, dtype=torch.long ).reshape(2, -1 )
UpperCamelCase__ = input_ids.reshape(2 * 4, -1 )
with torch.no_grad():
UpperCamelCase__ = model(input_ids=UpperCamelCase__, decoder_input_ids=UpperCamelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
UpperCamelCase__ = AutoTokenizer.from_pretrained('''t5-base''' )
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''', padding_side='''left''' )
UpperCamelCase__ = MusicgenProcessor(feature_extractor=UpperCamelCase__, tokenizer=UpperCamelCase__ )
# set the appropriate bos/pad token ids
UpperCamelCase__ = 2048
UpperCamelCase__ = 2048
# set other default generation config params
UpperCamelCase__ = int(30 * audio_encoder.config.frame_rate )
UpperCamelCase__ = True
UpperCamelCase__ = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(UpperCamelCase__ )
processor.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
lowercase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 35 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {"vocab_file": "vocab.txt"}
lowerCAmelCase : Dict = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
lowerCAmelCase : int = {
"openbmb/cpm-ant-10b": 10_24,
}
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = collections.OrderedDict()
with open(_UpperCamelCase , 'r' , encoding='utf-8' ) as reader:
SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.readlines()
for index, token in enumerate(_UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = token.rstrip('\n' )
SCREAMING_SNAKE_CASE_ : List[str] = index
return vocab
class _A ( A__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE=200 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = vocab
SCREAMING_SNAKE_CASE_ : List[str] = unk_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_input_chars_per_word
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = list(_a )
if len(_a ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Dict = []
while start < len(_a ):
SCREAMING_SNAKE_CASE_ : Tuple = len(_a )
SCREAMING_SNAKE_CASE_ : Dict = None
while start < end:
SCREAMING_SNAKE_CASE_ : List[Any] = ''.join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE_ : int = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = end
return sub_tokens
class _A ( A__):
SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[int] = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE : Dict = False
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<d>" , _SCREAMING_SNAKE_CASE="</d>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="</n>" , _SCREAMING_SNAKE_CASE="</_>" , _SCREAMING_SNAKE_CASE="left" , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=_a , eod_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , unk_token=_a , line_token=_a , space_token=_a , padding_side=_a , **_a , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bod_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = eod_token
SCREAMING_SNAKE_CASE_ : List[Any] = load_vocab(_a )
SCREAMING_SNAKE_CASE_ : List[Any] = self.encoder[space_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE_ : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _SCREAMING_SNAKE_CASE : x[1] ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : str = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.encoder["\n"]
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return len(self.encoder )
def UpperCAmelCase ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = []
for x in jieba.cut(_a , cut_all=_a ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_a ) )
return output_tokens
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE_ : List[str] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_a , **_a )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token in self.encoder
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join(_a )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.decoder.get(_a , self.unk_token )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if os.path.isdir(_a ):
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (filename_prefix + '-' if filename_prefix else '') + save_directory
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if " " in self.encoder:
SCREAMING_SNAKE_CASE_ : Tuple = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE_ : str = self.encoder['\n']
del self.encoder["\n"]
SCREAMING_SNAKE_CASE_ : Tuple = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _SCREAMING_SNAKE_CASE : x[1] ) )
with open(_a , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : List[str] = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a ))
return [1] + ([0] * len(_a ))
| 253 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class A__ ( A__ , A__ ):
A__ = 'convnextv2'
def __init__( self : Tuple , _a : Optional[int]=3 , _a : Any=4 , _a : int=4 , _a : Union[str, Any]=None , _a : List[str]=None , _a : Optional[Any]="gelu" , _a : Any=0.02 , _a : Any=1e-12 , _a : Tuple=0.0 , _a : int=224 , _a : Any=None , _a : Optional[int]=None , **_a : List[str] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_a )
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =num_stages
_SCREAMING_SNAKE_CASE =[96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
_SCREAMING_SNAKE_CASE =[3, 3, 9, 3] if depths is None else depths
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =drop_path_rate
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =['stem'] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 47 | 0 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : Union[str, Any] = k_size // 2
snake_case__ : int = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
snake_case__ : List[Any] = 1 / (2 * pi * sigma) * exp(-(square(__lowerCAmelCase ) + square(__lowerCAmelCase )) / (2 * square(__lowerCAmelCase )) )
return g
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Any = image.shape[0], image.shape[1]
# dst image height and width
snake_case__ : str = height - k_size + 1
snake_case__ : int = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
snake_case__ : str = zeros((dst_height * dst_width, k_size * k_size) )
snake_case__ : str = 0
for i, j in product(range(__lowerCAmelCase ) , range(__lowerCAmelCase ) ):
snake_case__ : Tuple = ravel(image[i : i + k_size, j : j + k_size] )
snake_case__ : Optional[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
snake_case__ : Tuple = gen_gaussian_kernel(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Tuple = ravel(__lowerCAmelCase )
# reshape and get the dst image
snake_case__ : Tuple = dot(__lowerCAmelCase , __lowerCAmelCase ).reshape(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase )
return dst
if __name__ == "__main__":
# read original image
UpperCAmelCase = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
UpperCAmelCase = gaussian_filter(gray, 3, sigma=1)
UpperCAmelCase = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 370 |
def _lowerCAmelCase ( __lowerCAmelCase = 50 ) -> int:
"""simple docstring"""
snake_case__ : Optional[int] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 44 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
UpperCAmelCase__ = """CIDAS/clipseg-rd64-refined"""
UpperCAmelCase__ = """image_segmenter"""
UpperCAmelCase__ = CLIPSegForImageSegmentation
UpperCAmelCase__ = ["""image""", """text"""]
UpperCAmelCase__ = ["""image"""]
def __init__( self : Optional[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Tuple ) -> Union[str, Any]:
requires_backends(self , ['vision'] )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Tuple , UpperCAmelCase : "Image" , UpperCAmelCase : str ) -> str:
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase , return_tensors='pt' )
def A_ ( self : Union[str, Any] , UpperCAmelCase : Dict ) -> Union[str, Any]:
with torch.no_grad():
lowerCamelCase__ : Dict = self.model(**UpperCAmelCase ).logits
return logits
def A_ ( self : Dict , UpperCAmelCase : Union[str, Any] ) -> Dict:
lowerCamelCase__ : List[Any] = outputs.cpu().detach().numpy()
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Dict = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 50 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """M-CLIP"""
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any]=1024 , UpperCAmelCase : Tuple=768 , **UpperCAmelCase : Optional[int] ) -> Dict:
lowerCamelCase__ : Optional[int] = transformerDimSize
lowerCamelCase__ : Optional[Any] = imageDimSize
super().__init__(**UpperCAmelCase )
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = MCLIPConfig
def __init__( self : List[Any] , UpperCAmelCase : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> Dict:
super().__init__(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Tuple = XLMRobertaModel(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Tuple:
lowerCamelCase__ : Any = self.transformer(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
lowerCamelCase__ : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(UpperCAmelCase ), embs
| 50 | 1 |
'''simple docstring'''
import sys
import turtle
def lowercase__( __UpperCamelCase: tuple[float, float] ,__UpperCamelCase: tuple[float, float] ):
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase__( __UpperCamelCase: tuple[float, float] ,__UpperCamelCase: tuple[float, float] ,__UpperCamelCase: tuple[float, float] ,__UpperCamelCase: int ,):
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(__UpperCamelCase ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,depth - 1 )
triangle(__UpperCamelCase ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,depth - 1 )
triangle(__UpperCamelCase ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,get_mid(__UpperCamelCase ,__UpperCamelCase ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
UpperCamelCase_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
UpperCamelCase_ = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 246 |
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=2, A=56, A=True, A=True, A=True, A=True, A=99, A=32, A=2, A=2, A=7, A="gelu_new", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=4, A="block_sparse", A=True, A=False, A=2, A=3, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Dict = use_attention_mask
SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
SCREAMING_SNAKE_CASE : int = rescale_embeddings
SCREAMING_SNAKE_CASE : Any = attention_type
SCREAMING_SNAKE_CASE : str = use_bias
SCREAMING_SNAKE_CASE : Tuple = block_size
SCREAMING_SNAKE_CASE : List[Any] = num_random_blocks
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = BigBirdConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=A, initializer_range=self.initializer_range, attention_type=self.attention_type, block_size=self.block_size, num_random_blocks=self.num_random_blocks, use_bias=self.use_bias, rescale_embeddings=self.rescale_embeddings, )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
A : List[Any] = False
A : List[str] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(A, A )
SCREAMING_SNAKE_CASE : List[str] = model_class(A )
@jax.jit
def model_jitted(A, A=None, **A ):
return model(input_ids=A, attention_mask=A, **A )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE : List[str] = model_jitted(**A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : Optional[Any] = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ), len(A ) )
for jitted_output, output in zip(A, A ):
self.assertEqual(jitted_output.shape, output.shape )
def UpperCamelCase_ ( self, A, A, A, A=1E-5, A="outputs", A=None ):
'''simple docstring'''
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(A, A, A, A, A, A )
| 246 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
)
def __UpperCAmelCase ( ) -> None:
lowercase__ : Any = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__ : Optional[Any] = math.log(len(__lowerCamelCase ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 16 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]:
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : str = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : int = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : List[Any] = 2
# Initialize accelerator
lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : str = config['''lr''']
lowercase__ : str = int(config['''num_epochs'''] )
lowercase__ : Optional[int] = int(config['''seed'''] )
lowercase__ : Tuple = int(config['''batch_size'''] )
lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[Any] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : Dict = model(**__lowerCamelCase )
lowercase__ : List[Any] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Any = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 16 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ =logging.get_logger(__name__)
UpperCamelCase_ ={
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def a_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
for attribute in key.split('''.''' ):
_UpperCamelCase : Dict = getattr(_lowercase , _lowercase )
if weight_type is not None:
_UpperCamelCase : str = getattr(_lowercase , _lowercase ).shape
else:
_UpperCamelCase : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_UpperCamelCase : Dict = value
elif weight_type == "weight_g":
_UpperCamelCase : int = value
elif weight_type == "weight_v":
_UpperCamelCase : List[str] = value
elif weight_type == "bias":
_UpperCamelCase : List[str] = value
else:
_UpperCamelCase : Union[str, Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def a_ ( _lowercase , _lowercase , _lowercase ):
_UpperCamelCase : str = []
_UpperCamelCase : List[str] = fairseq_model.state_dict()
_UpperCamelCase : Optional[int] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCamelCase : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase : str = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase : Tuple = True
if "*" in mapped_key:
_UpperCamelCase : str = name.split(_lowercase )[0].split('''.''' )[-2]
_UpperCamelCase : Optional[Any] = mapped_key.replace('''*''' , _lowercase )
if "weight_g" in name:
_UpperCamelCase : Tuple = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase : List[str] = '''weight_v'''
elif "weight" in name:
_UpperCamelCase : int = '''weight'''
elif "bias" in name:
_UpperCamelCase : List[Any] = '''bias'''
else:
_UpperCamelCase : Optional[int] = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
_UpperCamelCase : int = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase : Optional[int] = name.split('''.''' )
_UpperCamelCase : str = int(items[0] )
_UpperCamelCase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_UpperCamelCase : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_UpperCamelCase : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_UpperCamelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_UpperCamelCase : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowercase )
def a_ ( _lowercase , _lowercase ):
_UpperCamelCase : Optional[int] = SEWConfig()
if is_finetuned:
_UpperCamelCase : Optional[int] = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase : Union[str, Any] = model.cfg
_UpperCamelCase : int = fs_config.conv_bias
_UpperCamelCase : int = eval(fs_config.conv_feature_layers )
_UpperCamelCase : Any = [x[0] for x in conv_layers]
_UpperCamelCase : List[Any] = [x[1] for x in conv_layers]
_UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers]
_UpperCamelCase : List[Any] = '''gelu'''
_UpperCamelCase : Optional[Any] = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
_UpperCamelCase : List[str] = 0.0
_UpperCamelCase : Optional[Any] = fs_config.activation_fn.name
_UpperCamelCase : Union[str, Any] = fs_config.encoder_embed_dim
_UpperCamelCase : Dict = 0.02
_UpperCamelCase : Optional[Any] = fs_config.encoder_ffn_embed_dim
_UpperCamelCase : Any = 1E-5
_UpperCamelCase : List[Any] = fs_config.encoder_layerdrop
_UpperCamelCase : Optional[Any] = fs_config.encoder_attention_heads
_UpperCamelCase : List[str] = fs_config.conv_pos_groups
_UpperCamelCase : Union[str, Any] = fs_config.conv_pos
_UpperCamelCase : Union[str, Any] = len(_lowercase )
_UpperCamelCase : Any = fs_config.encoder_layers
_UpperCamelCase : Tuple = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase : str = model.cfg
_UpperCamelCase : Optional[int] = fs_config.final_dropout
_UpperCamelCase : Any = fs_config.layerdrop
_UpperCamelCase : List[str] = fs_config.activation_dropout
_UpperCamelCase : Optional[Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase : Union[str, Any] = fs_config.attention_dropout
_UpperCamelCase : Any = fs_config.dropout_input
_UpperCamelCase : Optional[int] = fs_config.dropout
_UpperCamelCase : Tuple = fs_config.mask_channel_length
_UpperCamelCase : Union[str, Any] = fs_config.mask_channel_prob
_UpperCamelCase : List[str] = fs_config.mask_length
_UpperCamelCase : Optional[Any] = fs_config.mask_prob
_UpperCamelCase : Dict = '''Wav2Vec2FeatureExtractor'''
_UpperCamelCase : Any = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def a_ ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=True ):
if is_finetuned:
_UpperCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_UpperCamelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase : Union[str, Any] = SEWConfig.from_pretrained(_lowercase )
else:
_UpperCamelCase : int = convert_config(model[0] , _lowercase )
_UpperCamelCase : int = model[0].eval()
_UpperCamelCase : Any = True if config.feat_extract_norm == '''layer''' else False
_UpperCamelCase : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
if is_finetuned:
if dict_path:
_UpperCamelCase : Optional[int] = Dictionary.load(_lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase : Dict = target_dict.pad_index
_UpperCamelCase : Optional[int] = target_dict.bos_index
_UpperCamelCase : int = target_dict.pad_index
_UpperCamelCase : Union[str, Any] = target_dict.bos_index
_UpperCamelCase : Tuple = target_dict.eos_index
_UpperCamelCase : Union[str, Any] = len(target_dict.symbols )
_UpperCamelCase : Dict = os.path.join(_lowercase , '''vocab.json''' )
if not os.path.isdir(_lowercase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowercase ) )
return
os.makedirs(_lowercase , exist_ok=_lowercase )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , _lowercase )
_UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
_lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowercase , )
_UpperCamelCase : Union[str, Any] = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
processor.save_pretrained(_lowercase )
_UpperCamelCase : Union[str, Any] = SEWForCTC(_lowercase )
else:
_UpperCamelCase : Any = SEWModel(_lowercase )
feature_extractor.save_pretrained(_lowercase )
recursively_load_weights(_lowercase , _lowercase , _lowercase )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
UpperCamelCase_ =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase_ =parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 367 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
UpperCamelCase_ ="""examples/"""
UpperCamelCase_ ={
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCamelCase_ ={
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCamelCase_ ="""README.md"""
def a_ ( _lowercase , _lowercase , _lowercase ):
with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_UpperCamelCase : Tuple = f.read()
_UpperCamelCase , _UpperCamelCase : List[Any] = REPLACE_PATTERNS[pattern]
_UpperCamelCase : Optional[Any] = replace.replace('''VERSION''' , _lowercase )
_UpperCamelCase : List[Any] = re_pattern.sub(_lowercase , _lowercase )
with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_lowercase )
def a_ ( _lowercase ):
for folder, directories, fnames in os.walk(_lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_lowercase , _lowercase ) , _lowercase , pattern='''examples''' )
def a_ ( _lowercase , _lowercase=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowercase , _lowercase , _lowercase )
if not patch:
update_version_in_examples(_lowercase )
def a_ ( ):
_UpperCamelCase : Any = '''🤗 Transformers currently provides the following architectures'''
_UpperCamelCase : List[str] = '''1. Want to contribute a new model?'''
with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_UpperCamelCase : List[Any] = f.readlines()
# Find the start of the list.
_UpperCamelCase : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_UpperCamelCase : Any = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_UpperCamelCase : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_lowercase )
def a_ ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
_UpperCamelCase : List[Any] = f.read()
_UpperCamelCase : List[Any] = REPLACE_PATTERNS['''init'''][0].search(_lowercase ).groups()[0]
return packaging.version.parse(_lowercase )
def a_ ( _lowercase=False ):
_UpperCamelCase : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_UpperCamelCase : List[str] = default_version.base_version
elif patch:
_UpperCamelCase : Union[str, Any] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_UpperCamelCase : str = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_UpperCamelCase : Optional[int] = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_lowercase ) == 0:
_UpperCamelCase : str = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_lowercase , patch=_lowercase )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def a_ ( ):
_UpperCamelCase : Any = get_version()
_UpperCamelCase : Dict = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_UpperCamelCase : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
_UpperCamelCase : int = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_lowercase ) == 0:
_UpperCamelCase : List[str] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_lowercase )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase_ =argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCamelCase_ =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 128 | 0 |
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
if index == number_of_items:
return 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = knapsack(snake_case , snake_case , snake_case , snake_case , index + 1 )
if weights[index] <= max_weight:
_lowerCAmelCase = values[index] + knapsack(
snake_case , snake_case , snake_case , max_weight - weights[index] , index + 1 )
return max(snake_case , snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "Speech2TextFeatureExtractor"
snake_case__ : Union[str, Any] = "Speech2TextTokenizer"
def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : str = False
def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_lowerCamelCase : str = kwargs.pop('''raw_speech''' )
else:
_lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_lowerCamelCase : List[Any] = args[0]
_lowerCamelCase : int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
_lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCamelCase : List[str] = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Any = self.tokenizer
yield
_lowerCamelCase : List[str] = self.feature_extractor
_lowerCamelCase : Tuple = False
| 72 | 0 |
import numpy as np
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return vector * sigmoid(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_SCREAMING_SNAKE_CASE : Dict = '''base_with_context'''
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_A )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE__ = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE__ = ly_weight['''attention''']
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_A )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE__ = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE__ = ly_weight['''attention''']
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_A )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
SCREAMING_SNAKE_CASE__ = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = ly_weight['''self_attention''']
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = ly_weight['''MultiHeadDotProductAttention_0''']
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
SCREAMING_SNAKE_CASE__ = jnp.tree_util.tree_map(onp.array , _A )
SCREAMING_SNAKE_CASE__ = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
SCREAMING_SNAKE_CASE__ = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
SCREAMING_SNAKE_CASE__ = inference.parse_training_gin_file(_A , _A )
SCREAMING_SNAKE_CASE__ = inference.InferenceModel(args.checkpoint_path , _A )
SCREAMING_SNAKE_CASE__ = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
SCREAMING_SNAKE_CASE__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
SCREAMING_SNAKE_CASE__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
SCREAMING_SNAKE_CASE__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
SCREAMING_SNAKE_CASE__ = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , _A )
SCREAMING_SNAKE_CASE__ = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , _A )
SCREAMING_SNAKE_CASE__ = load_decoder(ta_checkpoint['''target''']['''decoder'''] , _A )
SCREAMING_SNAKE_CASE__ = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
SCREAMING_SNAKE_CASE__ = SpectrogramDiffusionPipeline(
notes_encoder=_A , continuous_encoder=_A , decoder=_A , scheduler=_A , melgan=_A , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F"{MODEL}/checkpoint_500000",
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
main(args)
| 218 | 1 |
"""simple docstring"""
import string
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
lowerCAmelCase = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
lowerCAmelCase = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE )
lowerCAmelCase = num - key
if num < 0:
lowerCAmelCase = num + len(string.ascii_uppercase )
lowerCAmelCase = translated + string.ascii_uppercase[num]
else:
lowerCAmelCase = translated + symbol
print(F'Decryption using Key #{key}: {translated}' )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = input("""Encrypted message: """ )
lowerCAmelCase = message.upper()
decrypt(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 46 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , lowercase=2 , lowercase=99 , lowercase=0 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=12 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase="last" , lowercase=None , lowercase=None , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_lengths
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = gelu_activation
lowerCAmelCase = sinusoidal_embeddings
lowerCAmelCase = causal
lowerCAmelCase = asm
lowerCAmelCase = n_langs
lowerCAmelCase = vocab_size
lowerCAmelCase = n_special
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = summary_type
lowerCAmelCase = use_proj
lowerCAmelCase = scope
def _snake_case ( self ) -> int:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_input_lengths:
lowerCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self ) -> List[Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any:
lowerCAmelCase = FlaubertModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , lengths=lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase , langs=lowercase )
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = FlaubertWithLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> str:
lowerCAmelCase = FlaubertForQuestionAnsweringSimple(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Dict:
lowerCAmelCase = FlaubertForQuestionAnswering(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , p_mask=lowercase , )
lowerCAmelCase = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
lowerCAmelCase = model(lowercase , start_positions=lowercase , end_positions=lowercase )
((lowerCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = FlaubertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = FlaubertForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCAmelCase = self.num_choices
lowerCAmelCase = FlaubertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , lowercase , lowercase , lowercase=False ) -> Optional[Any]:
lowerCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = FlaubertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , emb_dim=37 )
def _snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase )
@slow
def _snake_case ( self ) -> Tuple:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = FlaubertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@slow
@require_torch_gpu
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase = True
lowerCAmelCase = model_class(config=lowercase )
lowerCAmelCase = self._prepare_for_class(lowercase , lowercase )
lowerCAmelCase = torch.jit.trace(
lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase , os.path.join(lowercase , """traced_model.pt""" ) )
lowerCAmelCase = torch.jit.load(os.path.join(lowercase , """traced_model.pt""" ) , map_location=lowercase )
loaded(inputs_dict["""input_ids"""].to(lowercase ) , inputs_dict["""attention_mask"""].to(lowercase ) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCAmelCase = model(lowercase )[0]
lowerCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) )
| 46 | 1 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __A ( a_ :List[str] , a_ :Optional[Any] , a_ :List[str] , a_ :Tuple , a_ :str) -> Optional[Any]:
# Load configuration defined in the metadata file
with open(a_) as metadata_file:
__a : List[Any] = json.load(a_)
__a : Optional[Any] = LukeConfig(use_entity_aware_attention=a_ , **metadata['''model_config'''])
# Load in the weights from the checkpoint_path
__a : List[Any] = torch.load(a_ , map_location='''cpu''')['''module''']
# Load the entity vocab file
__a : Tuple = load_original_entity_vocab(a_)
# add an entry for [MASK2]
__a : str = max(entity_vocab.values()) + 1
config.entity_vocab_size += 1
__a : Optional[int] = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''])
# Add special tokens to the token vocabulary for downstream tasks
__a : Tuple = AddedToken('''<ent>''' , lstrip=a_ , rstrip=a_)
__a : Union[str, Any] = AddedToken('''<ent2>''' , lstrip=a_ , rstrip=a_)
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]})
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""")
tokenizer.save_pretrained(a_)
with open(os.path.join(a_ , '''tokenizer_config.json''') , '''r''') as f:
__a : int = json.load(a_)
__a : List[str] = '''MLukeTokenizer'''
with open(os.path.join(a_ , '''tokenizer_config.json''') , '''w''') as f:
json.dump(a_ , a_)
with open(os.path.join(a_ , MLukeTokenizer.vocab_files_names['''entity_vocab_file''']) , '''w''') as f:
json.dump(a_ , a_)
__a : Any = MLukeTokenizer.from_pretrained(a_)
# Initialize the embeddings of the special tokens
__a : Optional[int] = tokenizer.convert_tokens_to_ids(['''@'''])[0]
__a : str = tokenizer.convert_tokens_to_ids(['''#'''])[0]
__a : List[Any] = state_dict['''embeddings.word_embeddings.weight''']
__a : List[str] = word_emb[ent_init_index].unsqueeze(0)
__a : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0)
__a : str = torch.cat([word_emb, ent_emb, enta_emb])
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__a : Union[str, Any] = state_dict[bias_name]
__a : List[str] = decoder_bias[ent_init_index].unsqueeze(0)
__a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0)
__a : Union[str, Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias])
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers):
for matrix_name in ["query.weight", "query.bias"]:
__a : List[Any] = F"""encoder.layer.{layer_index}.attention.self."""
__a : List[Any] = state_dict[prefix + matrix_name]
__a : Dict = state_dict[prefix + matrix_name]
__a : Tuple = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__a : Dict = state_dict['''entity_embeddings.entity_embeddings.weight''']
__a : str = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0)
__a : Optional[Any] = torch.cat([entity_emb, entity_mask_emb])
# add [MASK2] for 'entity_predictions.bias'
__a : Optional[Any] = state_dict['''entity_predictions.bias''']
__a : Dict = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0)
__a : int = torch.cat([entity_prediction_bias, entity_mask_bias])
__a : int = LukeForMaskedLM(config=a_).eval()
state_dict.pop('''entity_predictions.decoder.weight''')
state_dict.pop('''lm_head.decoder.weight''')
state_dict.pop('''lm_head.decoder.bias''')
__a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''') or key.startswith('''entity_predictions''')):
__a : Optional[int] = state_dict[key]
else:
__a : Optional[int] = state_dict[key]
__a , __a : List[str] = model.load_state_dict(a_ , strict=a_)
if set(a_) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""")
if set(a_) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""")
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__a : List[str] = MLukeTokenizer.from_pretrained(a_ , task='''entity_classification''')
__a : Dict = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
__a : Optional[int] = (0, 9)
__a : List[Any] = tokenizer(a_ , entity_spans=[span] , return_tensors='''pt''')
__a : Optional[int] = model(**a_)
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__a : List[str] = torch.Size((1, 33, 7_68))
__a : Optional[int] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]])
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""")
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__a : Any = torch.Size((1, 1, 7_68))
__a : List[Any] = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]])
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""")
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , a_ , atol=1e-4):
raise ValueError
# Verify masked word/entity prediction
__a : int = MLukeTokenizer.from_pretrained(a_)
__a : Optional[int] = '''Tokyo is the capital of <mask>.'''
__a : Optional[int] = (24, 30)
__a : List[str] = tokenizer(a_ , entity_spans=[span] , return_tensors='''pt''')
__a : int = model(**a_)
__a : Any = encoding['''input_ids'''][0].tolist()
__a : Union[str, Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>'''))
__a : Dict = outputs.logits[0][mask_position_id].argmax(dim=-1)
assert "Japan" == tokenizer.decode(a_)
__a : Any = outputs.entity_logits[0][0].argmax().item()
__a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''')][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(a_))
model.save_pretrained(a_)
def __A ( a_ :str) -> str:
__a : Tuple = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
__a : str = [json.loads(a_) for line in open(a_)]
__a : int = {}
for entry in data:
__a : Optional[int] = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__a : str = entity_id
break
__a : Dict = F"""{language}:{entity_name}"""
__a : str = entity_id
return new_mapping
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
A = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 188 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _lowerCamelCase ( self , _UpperCAmelCase=0 ):
__a : Tuple = floats_tensor((1, 3, 128, 128) , rng=random.Random(_UpperCAmelCase ) )
__a : Any = np.random.RandomState(_UpperCAmelCase )
__a : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Dict = self.get_dummy_inputs()
__a : Any = pipe(**_UpperCAmelCase ).images
__a : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__a : List[Any] = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Optional[int] = self.get_dummy_inputs()
__a : Optional[Any] = pipe(**_UpperCAmelCase ).images
__a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[int] = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# warmup pass to apply optimizations
__a : Any = pipe(**self.get_dummy_inputs() )
__a : List[str] = self.get_dummy_inputs()
__a : Tuple = pipe(**_UpperCAmelCase ).images
__a : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : int = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : List[Any] = self.get_dummy_inputs()
__a : Any = pipe(**_UpperCAmelCase ).images
__a : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Union[str, Any] = self.get_dummy_inputs()
__a : str = pipe(**_UpperCAmelCase ).images
__a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[int] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Optional[int] = self.get_dummy_inputs()
__a : Optional[Any] = pipe(**_UpperCAmelCase ).images
__a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[Any] = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self ):
__a : Optional[Any] = ort.SessionOptions()
__a : Any = False
return options
def _lowerCamelCase ( self ):
__a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__a : Tuple = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__a : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Tuple = '''A fantasy landscape, trending on artstation'''
__a : Tuple = np.random.RandomState(0 )
__a : int = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=_UpperCAmelCase , output_type='''np''' , )
__a : List[Any] = output.images
__a : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__a : Any = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowerCamelCase ( self ):
__a : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__a : Tuple = init_image.resize((768, 512) )
__a : str = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__a : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : List[str] = '''A fantasy landscape, trending on artstation'''
__a : str = np.random.RandomState(0 )
__a : str = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=_UpperCAmelCase , output_type='''np''' , )
__a : Dict = output.images
__a : List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__a : Dict = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 188 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 ,SCREAMING_SNAKE_CASE__ : List[str]=3 ,SCREAMING_SNAKE_CASE__ : Dict=30 ,SCREAMING_SNAKE_CASE__ : List[str]=400 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : Optional[int]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : List[Any]=1 / 255 ,SCREAMING_SNAKE_CASE__ : Dict=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE:int = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
SCREAMING_SNAKE_CASE:Dict = parent
SCREAMING_SNAKE_CASE:Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE:Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE:List[Any] = min_resolution
SCREAMING_SNAKE_CASE:Optional[Any] = max_resolution
SCREAMING_SNAKE_CASE:int = do_resize
SCREAMING_SNAKE_CASE:List[Any] = size
SCREAMING_SNAKE_CASE:Optional[int] = do_normalize
SCREAMING_SNAKE_CASE:List[Any] = image_mean
SCREAMING_SNAKE_CASE:List[Any] = image_std
SCREAMING_SNAKE_CASE:str = do_rescale
SCREAMING_SNAKE_CASE:List[Any] = rescale_factor
SCREAMING_SNAKE_CASE:List[Any] = do_pad
def __UpperCamelCase ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : str=False ):
if not batched:
SCREAMING_SNAKE_CASE:str = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ ,Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Tuple = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE:Optional[Any] = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE:int = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE:List[Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE:Optional[Any] = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE:Optional[int] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE:List[Any] = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE:Any = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE:str = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
SCREAMING_SNAKE_CASE:Tuple = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( _a , unittest.TestCase ):
_A : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:Optional[Any] = DetaImageProcessingTester(self )
@property
def __UpperCamelCase ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,"image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,"image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,"do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,"do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,"do_rescale" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,"do_pad" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,"size" ) )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Dict ):
pass
def __UpperCamelCase ( self : Dict ):
# Initialize image_processing
SCREAMING_SNAKE_CASE:str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE:str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE:Optional[int] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Union[str, Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCamelCase ( self : Optional[int] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE:List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE:List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE:Optional[int] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE:List[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:str = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCamelCase ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE:Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE:Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE:Optional[Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Union[str, Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE:Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Dict = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCamelCase ( self : List[Any] ):
# prepare image and target
SCREAMING_SNAKE_CASE:Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" ,"r" ) as f:
SCREAMING_SNAKE_CASE:List[str] = json.loads(f.read() )
SCREAMING_SNAKE_CASE:List[Any] = {"image_id": 39_769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE:Tuple = DetaImageProcessor()
SCREAMING_SNAKE_CASE:Optional[int] = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE:Any = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] ,SCREAMING_SNAKE_CASE__ ) )
# verify boxes
SCREAMING_SNAKE_CASE:Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] ,SCREAMING_SNAKE_CASE__ ,atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE:List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] ,SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE:int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] ,SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE:Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] ,SCREAMING_SNAKE_CASE__ ) )
# verify orig_size
SCREAMING_SNAKE_CASE:List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] ,SCREAMING_SNAKE_CASE__ ) )
# verify size
SCREAMING_SNAKE_CASE:Any = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] ,SCREAMING_SNAKE_CASE__ ) )
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE:str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" ,"r" ) as f:
SCREAMING_SNAKE_CASE:List[str] = json.loads(f.read() )
SCREAMING_SNAKE_CASE:List[str] = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
SCREAMING_SNAKE_CASE:str = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE:Optional[Any] = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE:Optional[int] = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,masks_path=SCREAMING_SNAKE_CASE__ ,return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE:Tuple = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE:str = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] ,SCREAMING_SNAKE_CASE__ ) )
# verify boxes
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] ,SCREAMING_SNAKE_CASE__ ,atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE:int = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] ,SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE:Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] ,SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE:Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] ,SCREAMING_SNAKE_CASE__ ) )
# verify masks
SCREAMING_SNAKE_CASE:Any = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() ,SCREAMING_SNAKE_CASE__ )
# verify orig_size
SCREAMING_SNAKE_CASE:Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] ,SCREAMING_SNAKE_CASE__ ) )
# verify size
SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] ,SCREAMING_SNAKE_CASE__ ) )
| 139 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
def __init__( self : Optional[int] ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : str ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." ,SCREAMING_SNAKE_CASE__ ,)
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
| 139 | 1 |
'''simple docstring'''
from PIL import Image
def a ( __a ) -> Image:
'''simple docstring'''
UpperCamelCase__ :str = image.size
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :str = image.load()
for i in range(__a ):
for j in range(__a ):
UpperCamelCase__ :Optional[Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__a ):
for i in range(__a ):
UpperCamelCase__ :List[Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__snake_case = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''') | 355 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
__snake_case = logging.getLogger(__name__)
class lowercase ( A__ ):
"""simple docstring"""
_a = 'masked_bert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=0 , UpperCamelCase_="topK" , UpperCamelCase_="constant" , UpperCamelCase_=0.0 , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Tuple = vocab_size
UpperCamelCase__ :Dict = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :Any = num_attention_heads
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :str = intermediate_size
UpperCamelCase__ :List[Any] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :List[Any] = max_position_embeddings
UpperCamelCase__ :int = type_vocab_size
UpperCamelCase__ :List[Any] = initializer_range
UpperCamelCase__ :Any = layer_norm_eps
UpperCamelCase__ :str = pruning_method
UpperCamelCase__ :int = mask_init
UpperCamelCase__ :Optional[Any] = mask_scale | 219 | 0 |
'''simple docstring'''
lowerCamelCase :Optional[int] = range(2, 2_0 + 1)
lowerCamelCase :List[Any] = [1_0**k for k in range(ks[-1] + 1)]
lowerCamelCase :dict[int, dict[int, list[list[int]]]] = {}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = sum(a_i[j] for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ) )
A_ : List[Any] = sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase__ ) , lowerCamelCase__ ) ) )
A_, A_ : List[Any] = 0, 0
A_ : int = n - i
A_ : Any = memo.get(lowerCamelCase__ )
if sub_memo is not None:
A_ : Union[str, Any] = sub_memo.get(lowerCamelCase__ )
if jumps is not None and len(lowerCamelCase__ ) > 0:
# find and make the largest jump without going over
A_ : Dict = -1
for _k in range(len(lowerCamelCase__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ : int = _k
break
if max_jump >= 0:
A_, A_, A_ : Optional[int] = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ : Optional[int] = diff + c
for j in range(min(lowerCamelCase__ , len(lowerCamelCase__ ) ) ):
A_, A_ : Any = divmod(lowerCamelCase__ , 10 )
if new_c > 0:
add(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
A_ : Any = []
else:
A_ : List[Any] = {c: []}
A_ : Optional[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_, A_ : Union[str, Any] = next_term(lowerCamelCase__ , k - 1 , i + dn , lowerCamelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_, A_ : int = compute(lowerCamelCase__ , lowerCamelCase__ , i + dn , lowerCamelCase__ )
diff += _diff
dn += terms_jumped
A_ : Optional[Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ : Union[str, Any] = 0
while j < len(lowerCamelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase__ , (diff, dn, k) )
return (diff, dn)
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(lowerCamelCase__ ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ : List[str] = i
A_, A_, A_ : int = 0, 0, 0
for j in range(len(lowerCamelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ : Dict = ds_c + ds_b
diff += addend
A_ : Optional[int] = 0
for j in range(lowerCamelCase__ ):
A_ : Optional[Any] = a_i[j] + addend
A_, A_ : List[str] = divmod(lowerCamelCase__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return diff, i - start_i
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ):
A_ : Union[str, Any] = digits[j] + addend
if s >= 10:
A_, A_ : Union[str, Any] = divmod(lowerCamelCase__ , 10 )
A_ : Union[str, Any] = addend // 10 + quotient
else:
A_ : Optional[int] = s
A_ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A_, A_ : Union[str, Any] = divmod(lowerCamelCase__ , 10 )
digits.append(lowerCamelCase__ )
def a ( lowerCamelCase__ = 10**15 ):
'''simple docstring'''
A_ : Dict = [1]
A_ : Any = 1
A_ : int = 0
while True:
A_, A_ : Union[str, Any] = next_term(lowerCamelCase__ , 20 , i + dn , lowerCamelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ : Union[str, Any] = 0
for j in range(len(lowerCamelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"{solution() = }") | 206 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
A_ : Any = field
A_ : Optional[int] = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
A_ : str = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _a (self ):
# Build iterable dataset
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : Optional[Any] = None
A_ : Optional[Any] = None
A_ : Tuple = None
A_ : Optional[int] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
A_ : Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
A_ : Union[str, Any] = dataset
A_ : Optional[int] = path_or_buf
A_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : List[str] = num_proc
A_ : Union[str, Any] = """utf-8"""
A_ : Dict = to_json_kwargs
def _a (self ):
A_ : Optional[Any] = self.to_json_kwargs.pop("""path_or_buf""" , lowercase )
A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : str = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer:
A_ : List[str] = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
A_ : List[Any] = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _a (self , lowercase ):
A_, A_, A_, A_, A_ : Dict = args
A_ : Any = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : Union[str, Any] = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
A_, A_ : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowercase )
return written | 206 | 1 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : int = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ : int = '''AutoImageProcessor'''
UpperCamelCase_ : str = '''AutoTokenizer'''
def __init__( self : str , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Tuple=None , **lowerCAmelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase__ , )
_UpperCAmelCase : Tuple = kwargs.pop("feature_extractor" )
_UpperCAmelCase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = self.image_processor
_UpperCAmelCase : Any = False
def __call__( self : Union[str, Any] , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[Any] ) -> int:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = kwargs.pop("images" , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = kwargs.pop("text" , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
_UpperCAmelCase : Dict = args[0]
_UpperCAmelCase : str = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
_UpperCAmelCase : Optional[Any] = self.image_processor(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
_UpperCAmelCase : List[str] = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase : Optional[Any] = encodings["input_ids"]
return inputs
def _lowerCAmelCase ( self : str , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : int ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : Union[str, Any] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@contextmanager
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : List[Any] = self.tokenizer
yield
_UpperCAmelCase : Union[str, Any] = self.image_processor
_UpperCAmelCase : int = False
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : List[Any]=None ) -> str:
"""simple docstring"""
if added_vocab is None:
_UpperCAmelCase : str = self.tokenizer.get_added_vocab()
_UpperCAmelCase : int = {}
while tokens:
_UpperCAmelCase : Union[str, Any] = re.search(R"<s_(.*?)>" , lowerCAmelCase__ , re.IGNORECASE )
if start_token is None:
break
_UpperCAmelCase : Union[str, Any] = start_token.group(1 )
_UpperCAmelCase : Union[str, Any] = re.search(RF"""</s_{key}>""" , lowerCAmelCase__ , re.IGNORECASE )
_UpperCAmelCase : Tuple = start_token.group()
if end_token is None:
_UpperCAmelCase : Any = tokens.replace(lowerCAmelCase__ , "" )
else:
_UpperCAmelCase : Any = end_token.group()
_UpperCAmelCase : Tuple = re.escape(lowerCAmelCase__ )
_UpperCAmelCase : Any = re.escape(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , lowerCAmelCase__ , re.IGNORECASE )
if content is not None:
_UpperCAmelCase : Union[str, Any] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_UpperCAmelCase : Optional[Any] = self.tokenajson(lowerCAmelCase__ , is_inner_value=lowerCAmelCase__ , added_vocab=lowerCAmelCase__ )
if value:
if len(lowerCAmelCase__ ) == 1:
_UpperCAmelCase : str = value[0]
_UpperCAmelCase : Tuple = value
else: # leaf nodes
_UpperCAmelCase : List[Any] = []
for leaf in content.split(R"<sep/>" ):
_UpperCAmelCase : Union[str, Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_UpperCAmelCase : Tuple = leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase__ )
if len(output[key] ) == 1:
_UpperCAmelCase : Tuple = output[key][0]
_UpperCAmelCase : Tuple = tokens[tokens.find(lowerCAmelCase__ ) + len(lowerCAmelCase__ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowerCAmelCase__ , added_vocab=lowerCAmelCase__ )
if len(lowerCAmelCase__ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase__ , )
return self.image_processor_class
@property
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase__ , )
return self.image_processor | 17 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['LayoutLMv2FeatureExtractor']
__a = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 17 | 1 |
import os
import string
import sys
a_ = 1 << 8
a_ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
a_ = KEYMAP["""up"""]
a_ = KEYMAP["""left"""]
if sys.platform == "win32":
a_ = []
a_ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
a_ = ord(str(i))
def a__ ( ):
if os.name == "nt":
import msvcrt
__lowerCamelCase = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_UpperCamelCase ) == 0:
# Read the keystroke
__lowerCamelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__lowerCamelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__lowerCamelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(_UpperCamelCase )
if ord(_UpperCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
__lowerCamelCase = chr(KEYMAP['''esc'''] )
except KeyError:
__lowerCamelCase = cha[1]
else:
__lowerCamelCase = ch.decode(_UpperCamelCase )
else:
__lowerCamelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__lowerCamelCase = sys.stdin.fileno()
__lowerCamelCase = termios.tcgetattr(_UpperCamelCase )
try:
tty.setraw(_UpperCamelCase )
__lowerCamelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_UpperCamelCase ,termios.TCSADRAIN ,_UpperCamelCase )
return ch
def a__ ( ):
__lowerCamelCase = get_raw_chars()
if ord(_UpperCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_UpperCamelCase ) == KEYMAP["esc"]:
__lowerCamelCase = get_raw_chars()
if ord(_UpperCamelCase ) == KEYMAP["mod_int"]:
__lowerCamelCase = get_raw_chars()
if ord(_UpperCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_UpperCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_UpperCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 330 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Any = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ : List[Any] = VideoClassificationPipeline(model=__magic_name__ , image_processor=__magic_name__ , top_k=2 )
snake_case_ : str = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
for example in examples:
snake_case_ : Union[str, Any] = video_classifier(__magic_name__ )
self.assertEqual(
__magic_name__ , [
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
] , )
@require_torch
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Any = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
snake_case_ : str = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
snake_case_ : int = pipeline(
'''video-classification''' , model=__magic_name__ , feature_extractor=__magic_name__ , frame_sampling_rate=4 )
snake_case_ : List[str] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ : Union[str, Any] = video_classifier(__magic_name__ , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , )
snake_case_ : int = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
| 279 | 0 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCAmelCase__ : Dict = TypeVar("T")
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase_ : bool = True ):
"""simple docstring"""
__UpperCAmelCase : dict[T, list[T]] = {} # dictionary of lists
__UpperCAmelCase : List[Any] = directed
def lowerCamelCase_ ( self : Any , UpperCAmelCase_ : T , UpperCAmelCase_ : T ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase_ )
self.adj_list[destination_vertex].append(UpperCAmelCase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase_ )
__UpperCAmelCase : Any = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__UpperCAmelCase : Tuple = [destination_vertex]
__UpperCAmelCase : Optional[Any] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase_ )
__UpperCAmelCase : str = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__UpperCAmelCase : Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__UpperCAmelCase : Dict = [destination_vertex]
__UpperCAmelCase : Optional[Any] = []
return self
def __repr__( self : Dict ):
"""simple docstring"""
return pformat(self.adj_list )
| 37 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowerCAmelCase__ : str = logging.get_logger(__name__)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = UNetaDModel
SCREAMING_SNAKE_CASE = '''sample'''
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : Dict = (32, 32)
__UpperCAmelCase : Any = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : str = torch.tensor([10] ).to(UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return (3, 32, 32)
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
__UpperCAmelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = UNetaDModel
SCREAMING_SNAKE_CASE = '''sample'''
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : str = 4
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : Optional[int] = (32, 32)
__UpperCAmelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = torch.tensor([10] ).to(UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return (4, 32, 32)
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return (4, 32, 32)
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Dict = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
__UpperCAmelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : str = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : List[str] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
__UpperCAmelCase , __UpperCAmelCase : str = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
model_accelerate.to(UpperCAmelCase_ )
model_accelerate.eval()
__UpperCAmelCase : Optional[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
__UpperCAmelCase : int = noise.to(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = model_accelerate(UpperCAmelCase_ , UpperCAmelCase_ )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
__UpperCAmelCase , __UpperCAmelCase : str = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ , low_cpu_mem_usage=UpperCAmelCase_ )
model_normal_load.to(UpperCAmelCase_ )
model_normal_load.eval()
__UpperCAmelCase : Optional[Any] = model_normal_load(UpperCAmelCase_ , UpperCAmelCase_ )["sample"]
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-3 )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Tuple = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__UpperCAmelCase : Optional[int] = noise.to(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase_ )
with torch.no_grad():
__UpperCAmelCase : Dict = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
__UpperCAmelCase : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__UpperCAmelCase : int = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-3 ) )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = UNetaDModel
SCREAMING_SNAKE_CASE = '''sample'''
@property
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : List[str]=(32, 32) ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = 4
__UpperCAmelCase : Tuple = 3
__UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return (3, 32, 32)
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1e-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
__UpperCAmelCase : int = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : int = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Any = self.dummy_input
__UpperCAmelCase : int = floats_tensor((4, 3) + (256, 256) ).to(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = noise
__UpperCAmelCase : Optional[Any] = model(**UpperCAmelCase_ )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = 4
__UpperCAmelCase : Optional[int] = 3
__UpperCAmelCase : int = (256, 256)
__UpperCAmelCase : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : Dict = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase_ )
with torch.no_grad():
__UpperCAmelCase : Tuple = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
__UpperCAmelCase : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__UpperCAmelCase : Tuple = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-2 ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : Dict = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = 4
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : Union[str, Any] = (32, 32)
__UpperCAmelCase : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase_ )
with torch.no_grad():
__UpperCAmelCase : str = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
__UpperCAmelCase : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__UpperCAmelCase : Any = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-2 ) )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
# not required for this model
pass
| 37 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""Speech2TextFeatureExtractor"""
snake_case_ ="""Speech2TextTokenizer"""
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ) -> str:
"""simple docstring"""
super().__init__(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : int = self.feature_extractor
lowerCAmelCase__ : List[str] = False
def __call__(self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Dict:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase ,**__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowerCAmelCase__ : Optional[Any] = kwargs.pop('''raw_speech''' )
else:
lowerCAmelCase__ : str = kwargs.pop('''audio''' ,__lowerCamelCase )
lowerCAmelCase__ : List[str] = kwargs.pop('''sampling_rate''' ,__lowerCamelCase )
lowerCAmelCase__ : List[str] = kwargs.pop('''text''' ,__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
lowerCAmelCase__ : Union[str, Any] = args[0]
lowerCAmelCase__ : str = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowerCAmelCase__ : str = self.feature_extractor(__lowerCamelCase ,*__lowerCamelCase ,sampling_rate=__lowerCamelCase ,**__lowerCamelCase )
if text is not None:
lowerCAmelCase__ : Any = self.tokenizer(__lowerCamelCase ,**__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ : str = encodings['''input_ids''']
return inputs
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase ,**__lowerCamelCase )
@contextmanager
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Union[str, Any] = self.tokenizer
yield
lowerCAmelCase__ : List[str] = self.feature_extractor
lowerCAmelCase__ : Any = False
| 129 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__snake_case : Dict =logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__)
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,**__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__(self ,__lowerCamelCase ,**__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return super().__call__(__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : List[str] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : int = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=None ,__lowerCamelCase="This is a sound of {}." ) -> str:
"""simple docstring"""
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCAmelCase__ : List[str] = requests.get(__lowerCamelCase ).content
else:
with open(__lowerCamelCase ,'''rb''' ) as f:
lowerCAmelCase__ : int = f.read()
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Tuple = ffmpeg_read(__lowerCamelCase ,self.feature_extractor.sampling_rate )
if not isinstance(__lowerCamelCase ,np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
lowerCAmelCase__ : Any = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors='''pt''' )
lowerCAmelCase__ : Union[str, Any] = candidate_labels
lowerCAmelCase__ : str = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels]
lowerCAmelCase__ : Any = self.tokenizer(__lowerCamelCase ,return_tensors=self.framework ,padding=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = [text_inputs]
return inputs
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = model_inputs.pop('''candidate_labels''' )
lowerCAmelCase__ : List[str] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,__lowerCamelCase ):
lowerCAmelCase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ : List[str] = text_inputs[0][0]
lowerCAmelCase__ : Union[str, Any] = self.model(**__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : Any = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = model_outputs.pop('''candidate_labels''' )
lowerCAmelCase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowerCAmelCase__ : str = logits.softmax(dim=0 )
lowerCAmelCase__ : Dict = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
lowerCAmelCase__ : Any = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowerCamelCase ,__lowerCamelCase ) ,key=lambda __lowerCamelCase : -x[0] )
]
return result
| 129 | 1 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=0.999 , lowerCAmelCase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase_ = []
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = i / num_diffusion_timesteps
UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = 1
@register_to_config
def __init__( self : List[Any] , _UpperCAmelCase : int = 1000 , _UpperCAmelCase : float = 0.0001 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : str = "linear" , _UpperCAmelCase : Optional[Union[np.ndarray, List[float]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : int = 0 , _UpperCAmelCase : str = "epsilon" , _UpperCAmelCase : float = 1.0 , **_UpperCAmelCase : Any , ) -> List[Any]:
'''simple docstring'''
if kwargs.get("set_alpha_to_one" , _UpperCAmelCase ) is not None:
UpperCAmelCase_ = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
UpperCAmelCase_ = kwargs["set_alpha_to_one"]
if trained_betas is not None:
UpperCAmelCase_ = torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ = torch.linspace(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ = betas_for_alpha_bar(_UpperCAmelCase )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCAmelCase_ = 1.0 - self.betas
UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
UpperCAmelCase_ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
UpperCAmelCase_ = 1.0
# setable values
UpperCAmelCase_ = None
UpperCAmelCase_ = torch.from_numpy(np.arange(0 , _UpperCAmelCase ).copy().astype(np.intaa ) )
def lowercase__ ( self : List[str] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowercase__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, torch.device] = None ) -> int:
'''simple docstring'''
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
F""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
F""" maximal {self.config.num_train_timesteps} timesteps.""" )
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(0 , _UpperCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
UpperCAmelCase_ = torch.from_numpy(_UpperCAmelCase ).to(_UpperCAmelCase )
self.timesteps += self.config.steps_offset
def lowercase__ ( self : str , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
'''simple docstring'''
UpperCAmelCase_ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
UpperCAmelCase_ = self.alphas_cumprod[timestep]
UpperCAmelCase_ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
UpperCAmelCase_ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
UpperCAmelCase_ = model_output
elif self.config.prediction_type == "sample":
UpperCAmelCase_ = model_output
UpperCAmelCase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
UpperCAmelCase_ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_UpperCAmelCase , pred_original_sample=_UpperCAmelCase )
def __len__( self : List[Any] ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 351 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ) -> None:
'''simple docstring'''
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 241 | 0 |
import requests
from bsa import BeautifulSoup
def lowercase__ ( __snake_case : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
UpperCAmelCase_ : int = BeautifulSoup(requests.get(__snake_case ).text , 'html.parser' )
UpperCAmelCase_ : Tuple = soup.findAll('h1' )
UpperCAmelCase_ : Tuple = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__snake_case , __snake_case )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F'{key}\n{value}\n')
| 29 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : int = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ : Dict = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case__ : List[str] = 0.01
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
snake_case__ : str = time.time()
locka.acquire(_lowerCAmelCase )
assert time.time() - _start > timeout
def __snake_case( _lowerCAmelCase ) -> Tuple:
snake_case__ : Dict = """a""" * 1_000 + """.lock"""
snake_case__ : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(_lowerCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
snake_case__ : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_lowerCAmelCase ):
locka.acquire(0 )
| 35 | 0 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __magic_name__ ( pl.LightningModule ):
def __init__( self , __snake_case ) -> List[Any]:
'''simple docstring'''
super().__init__()
__a =model
__a =2
__a =nn.Linear(self.model.config.hidden_size , self.num_labels )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : str , _snake_case : str , _snake_case : str ):
"""simple docstring"""
__a =LongformerModel.from_pretrained(_snake_case )
__a =LightningModel(_snake_case )
__a =torch.load(_snake_case , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
__a =LongformerForQuestionAnswering.from_pretrained(_snake_case )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_snake_case )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 308 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=2 , __snake_case=99 , __snake_case=0 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=2 , __snake_case=4 , __snake_case="last" , __snake_case=True , __snake_case=None , __snake_case=0 , ) -> Optional[Any]:
'''simple docstring'''
__a =parent
__a =batch_size
__a =seq_length
__a =is_training
__a =use_input_lengths
__a =use_token_type_ids
__a =use_labels
__a =gelu_activation
__a =sinusoidal_embeddings
__a =causal
__a =asm
__a =n_langs
__a =vocab_size
__a =n_special
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_sequence_label_size
__a =initializer_range
__a =num_labels
__a =num_choices
__a =summary_type
__a =use_proj
__a =scope
__a =bos_token_id
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a =random_attention_mask([self.batch_size, self.seq_length] )
__a =None
if self.use_input_lengths:
__a =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__a =None
if self.use_token_type_ids:
__a =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__a =None
__a =None
__a =None
if self.use_labels:
__a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a =ids_tensor([self.batch_size] , 2 ).float()
__a =ids_tensor([self.batch_size] , self.num_choices )
__a =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , lengths=__snake_case , langs=__snake_case )
__a =model(__snake_case , langs=__snake_case )
__a =model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[int]:
'''simple docstring'''
__a =XLMWithLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Dict:
'''simple docstring'''
__a =XLMForQuestionAnsweringSimple(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
__a =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]:
'''simple docstring'''
__a =XLMForQuestionAnswering(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , )
__a =model(
__snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , )
((__a) , ) =result_with_labels.to_tuple()
__a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case )
((__a) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[Any]:
'''simple docstring'''
__a =XLMForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case )
__a =model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Any:
'''simple docstring'''
__a =self.num_labels
__a =XLMForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Tuple:
'''simple docstring'''
__a =self.num_choices
__a =XLMForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) =config_and_inputs
__a ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=False ) -> str:
'''simple docstring'''
__a =super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
__a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMModelTester(self )
__a =ConfigTester(self , config_class=__snake_case , emb_dim=37 )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Optional[Any]:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =min_length + idx + 1
__a =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Dict:
'''simple docstring'''
self.assertIsInstance(__snake_case , __snake_case )
self.assertListEqual(
[isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , )
self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__snake_case ):
# adds PAD dummy token
__a =min_length + idx + 1
__a =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , )
pass
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a =XLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(__snake_case )
__a =torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president
__a =[
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__a =model.generate(__snake_case , do_sample=__snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
| 308 | 1 |
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a : Union[str, Any] = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Tuple ) ->List[Any]:
'''simple docstring'''
inspect_dataset(_lowerCamelCase , _lowerCamelCase )
a : List[Any] = path + """.py"""
assert script_name in os.listdir(_lowerCamelCase )
assert "__pycache__" not in os.listdir(_lowerCamelCase )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : Dict ) ->int:
'''simple docstring'''
inspect_metric(_lowerCamelCase , _lowerCamelCase )
a : Tuple = path + """.py"""
assert script_name in os.listdir(_lowerCamelCase )
assert "__pycache__" not in os.listdir(_lowerCamelCase )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Tuple ) ->Optional[int]:
'''simple docstring'''
a : str = get_dataset_config_info(_lowerCamelCase , config_name=_lowerCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[str] ) ->Dict:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
get_dataset_config_info(_lowerCamelCase , config_name=_lowerCamelCase )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Union[str, Any] ) ->Tuple:
'''simple docstring'''
a : Dict = get_dataset_config_names(_lowerCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : str , _lowercase : Dict ) ->Optional[Any]:
'''simple docstring'''
a : Any = get_dataset_infos(_lowerCamelCase )
assert list(infos.keys() ) == expected_configs
a : Any = expected_configs[0]
assert expected_config in infos
a : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Optional[int] ) ->int:
'''simple docstring'''
a : Optional[Any] = get_dataset_infos(_lowerCamelCase )
assert expected_config in infos
a : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any] ) ->int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
get_dataset_split_names(_lowerCamelCase , config_name=_lowerCamelCase )
| 105 | """simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44 | 0 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Optional[int] = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = '''conditional_detr'''
UpperCAmelCase__: Union[str, Any] = ['''past_key_values''']
UpperCAmelCase__: Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , A__=True , A__=None , A__=3 , A__=300 , A__=6 , A__=2048 , A__=8 , A__=6 , A__=2048 , A__=8 , A__=0.0 , A__=0.0 , A__=True , A__="relu" , A__=256 , A__=0.1 , A__=0.0 , A__=0.0 , A__=0.0_2 , A__=1.0 , A__=False , A__="sine" , A__="resnet50" , A__=True , A__=False , A__=2 , A__=5 , A__=2 , A__=1 , A__=1 , A__=2 , A__=5 , A__=2 , A__=0.2_5 , **A__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(A__ , A__ ):
A__ : Optional[int] = backbone_config.get("""model_type""" )
A__ : Any = CONFIG_MAPPING[backbone_model_type]
A__ : List[Any] = config_class.from_dict(A__ )
A__ : Dict = use_timm_backbone
A__ : str = backbone_config
A__ : List[str] = num_channels
A__ : Optional[int] = num_queries
A__ : Union[str, Any] = d_model
A__ : str = encoder_ffn_dim
A__ : Tuple = encoder_layers
A__ : Optional[Any] = encoder_attention_heads
A__ : Any = decoder_ffn_dim
A__ : Tuple = decoder_layers
A__ : Optional[Any] = decoder_attention_heads
A__ : List[str] = dropout
A__ : List[Any] = attention_dropout
A__ : Optional[Any] = activation_dropout
A__ : Union[str, Any] = activation_function
A__ : int = init_std
A__ : int = init_xavier_std
A__ : Tuple = encoder_layerdrop
A__ : Optional[Any] = decoder_layerdrop
A__ : str = encoder_layers
A__ : List[Any] = auxiliary_loss
A__ : str = position_embedding_type
A__ : int = backbone
A__ : List[str] = use_pretrained_backbone
A__ : Tuple = dilation
# Hungarian matcher
A__ : Dict = class_cost
A__ : Union[str, Any] = bbox_cost
A__ : Any = giou_cost
# Loss coefficients
A__ : List[Any] = mask_loss_coefficient
A__ : Union[str, Any] = dice_loss_coefficient
A__ : List[Any] = cls_loss_coefficient
A__ : int = bbox_loss_coefficient
A__ : Any = giou_loss_coefficient
A__ : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=A__ , **A__ )
@property
def __A ( self ):
return self.encoder_attention_heads
@property
def __A ( self ):
return self.d_model
def __A ( self ):
A__ : Optional[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : Any = self.backbone_config.to_dict()
A__ : List[Any] = self.__class__.model_type
return output
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = version.parse('''1.11''' )
@property
def __A ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __A ( self ):
return 1e-5
@property
def __A ( self ):
return 12
| 368 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase (lowercase_: str , lowercase_: Optional[int] ) -> str:
A__ : Union[str, Any] = old_name
if "patch_embed" in old_name:
A__ , A__ , A__ : Any = old_name.split(""".""" )
if layer == "0":
A__ : List[Any] = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
A__ : Optional[int] = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
A__ : int = old_name.replace("""3""" , """convolution2""" )
else:
A__ : Dict = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(r"""\d\.\d""" , lowercase_ ):
A__ : str = r"""\b\d{2}\b"""
if bool(re.search(lowercase_ , lowercase_ ) ):
A__ : Optional[Any] = re.search(r"""\d\.\d\d.""" , lowercase_ ).group()
else:
A__ : int = re.search(r"""\d\.\d.""" , lowercase_ ).group()
if int(match[0] ) < 6:
A__ : Optional[Any] = old_name.replace(lowercase_ , """""" )
A__ : Tuple = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
A__ : int = """intermediate_stages.""" + trimmed_name
else:
A__ : Dict = old_name.replace(lowercase_ , """""" )
if int(match[2] ) < num_meta4D_last_stage:
A__ : Optional[int] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
A__ : Optional[Any] = str(int(match[2] ) - num_meta4D_last_stage )
A__ : Dict = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
A__ : str = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
A__ : Optional[int] = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
A__ : List[Any] = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
A__ : Optional[Any] = trimmed_name.replace("""fc2""" , """linear_out""" )
A__ : str = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(r""".\d.""" , lowercase_ ):
A__ : List[str] = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
A__ : Optional[int] = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A__ : Optional[int] = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A__ : int = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
A__ : Tuple = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
A__ : Optional[int] = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
A__ : Optional[Any] = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
A__ : Optional[Any] = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A__ : Union[str, Any] = new_name.replace("""norm""" , """layernorm""" )
A__ : Union[str, Any] = """efficientformer.""" + new_name
else:
A__ : int = """efficientformer.encoder.""" + new_name
return new_name
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Union[str, Any] ) -> Tuple:
for key in checkpoint.copy().keys():
A__ : List[Any] = checkpoint.pop(lowercase_ )
A__ : Dict = val
return checkpoint
def UpperCamelCase () -> Optional[int]:
A__ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : List[str] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return image
def UpperCamelCase (lowercase_: Path , lowercase_: Path , lowercase_: Path , lowercase_: bool ) -> Tuple:
A__ : Any = torch.load(lowercase_ , map_location="""cpu""" )["""model"""]
A__ : List[Any] = EfficientFormerConfig.from_json_file(lowercase_ )
A__ : Any = EfficientFormerForImageClassificationWithTeacher(lowercase_ )
A__ : List[str] = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
A__ : Union[str, Any] = config.depths[-1] - config.num_metaad_blocks + 1
A__ : Any = convert_torch_checkpoint(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
A__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
A__ : Optional[int] = prepare_img()
A__ : Optional[Any] = 256
A__ : str = 224
A__ : List[str] = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
A__ : Tuple = processor(images=lowercase_ , return_tensors="""pt""" ).pixel_values
# original processing pipeline
A__ : List[Any] = Compose(
[
Resize(lowercase_ , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(lowercase_ ),
ToTensor(),
Normalize(lowercase_ , lowercase_ ),
] )
A__ : Any = image_transforms(lowercase_ ).unsqueeze(0 )
assert torch.allclose(lowercase_ , lowercase_ )
A__ : Optional[int] = model(lowercase_ )
A__ : List[str] = outputs.logits
A__ : Tuple = (1, 1000)
if "l1" in model_name:
A__ : List[str] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A__ : Any = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A__ : Union[str, Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowercase_ )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add model""" , use_temp_dir=lowercase_ , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add image processor""" , use_temp_dir=lowercase_ , )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
A_ : List[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 141 | 0 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCamelCase__ : str = logging.get_logger(__name__)
class _UpperCAmelCase ( __a):
__a : List[Any] = ["""audio_values""", """audio_mask"""]
def __init__( self , _A=20_48 , _A=1 , _A=[16, 16] , _A=1_28 , _A=4_41_00 , _A=86 , _A=20_48 , _A=0.0 , **_A , ) -> List[str]:
'''simple docstring'''
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , **_A , )
_UpperCAmelCase : Any = spectrogram_length
_UpperCAmelCase : List[Any] = num_channels
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Tuple = feature_size // self.patch_size[1]
_UpperCAmelCase : Dict = n_fft
_UpperCAmelCase : List[Any] = sampling_rate // hop_length_to_sampling_rate
_UpperCAmelCase : Union[str, Any] = sampling_rate
_UpperCAmelCase : List[Any] = padding_value
_UpperCAmelCase : List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=_A , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __snake_case ( self , _A ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : List[str] = spectrogram(
_A , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
_UpperCAmelCase : Dict = log_spec[:, :-1]
_UpperCAmelCase : Optional[Any] = log_spec - 20.0
_UpperCAmelCase : List[str] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , _A , _A = None , _A = True , _A = None , _A = False , _A = False , **_A , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_UpperCAmelCase : str = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_UpperCAmelCase : Dict = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : Optional[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
_UpperCAmelCase : str = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : List[Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_UpperCAmelCase : List[Any] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _A ):
_UpperCAmelCase : List[str] = [np.asarray(_A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_UpperCAmelCase : Union[str, Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_UpperCAmelCase : Dict = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_UpperCAmelCase : int = np.array(_A ).astype(np.floataa )
# convert into correct format for padding
_UpperCAmelCase : List[Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_UpperCAmelCase : List[str] = np.ones([len(_A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_UpperCAmelCase : int = padded_audio_features * self.padding_value
for i in range(len(_A ) ):
_UpperCAmelCase : Union[str, Any] = audio_features[i]
_UpperCAmelCase : int = feature
# return as BatchFeature
if return_attention_mask:
_UpperCAmelCase : Tuple = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
_UpperCAmelCase : Any = {"""audio_values""": padded_audio_features}
_UpperCAmelCase : List[str] = BatchFeature(data=_A , tensor_type=_A )
return encoded_inputs
| 246 |
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase__ : str = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase__ : Union[str, Any] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCamelCase ( _lowerCAmelCase : List[str] ) -> Optional[Any]:
_UpperCAmelCase : str = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ), dtype=_lowerCAmelCase )[0]
@deprecated(_lowerCAmelCase, """Please use tf.data to implement this functionality.""" )
def UpperCamelCase ( _lowerCAmelCase : int ) -> Optional[Any]:
print("""Extracting""", f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
_UpperCAmelCase : Tuple = _readaa(_lowerCAmelCase )
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
_UpperCAmelCase : Optional[int] = _readaa(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = _readaa(_lowerCAmelCase )
_UpperCAmelCase : List[str] = _readaa(_lowerCAmelCase )
_UpperCAmelCase : List[str] = bytestream.read(rows * cols * num_images )
_UpperCAmelCase : Optional[Any] = numpy.frombuffer(_lowerCAmelCase, dtype=numpy.uinta )
_UpperCAmelCase : Any = data.reshape(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, 1 )
return data
@deprecated(_lowerCAmelCase, """Please use tf.one_hot on tensors.""" )
def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Tuple ) -> Union[str, Any]:
_UpperCAmelCase : int = labels_dense.shape[0]
_UpperCAmelCase : Any = numpy.arange(_lowerCAmelCase ) * num_classes
_UpperCAmelCase : Tuple = numpy.zeros((num_labels, num_classes) )
_UpperCAmelCase : Dict = 1
return labels_one_hot
@deprecated(_lowerCAmelCase, """Please use tf.data to implement this functionality.""" )
def UpperCamelCase ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Optional[int]=False, _lowerCAmelCase : Optional[Any]=10 ) -> Union[str, Any]:
print("""Extracting""", f.name )
with gzip.GzipFile(fileobj=_lowerCAmelCase ) as bytestream:
_UpperCAmelCase : Tuple = _readaa(_lowerCAmelCase )
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
_UpperCAmelCase : str = _readaa(_lowerCAmelCase )
_UpperCAmelCase : Dict = bytestream.read(_lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = numpy.frombuffer(_lowerCAmelCase, dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_lowerCAmelCase, _lowerCAmelCase )
return labels
class _UpperCAmelCase :
@deprecated(
_A , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self , _A , _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=None , ) -> str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int = random_seed.get_seed(_A )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_UpperCAmelCase : Tuple = dtypes.as_dtype(_A ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
_UpperCAmelCase : Union[str, Any] = 1_00_00
_UpperCAmelCase : Union[str, Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
_UpperCAmelCase : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_UpperCAmelCase : int = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_UpperCAmelCase : Dict = images.astype(numpy.floataa )
_UpperCAmelCase : Any = numpy.multiply(_A , 1.0 / 255.0 )
_UpperCAmelCase : Union[str, Any] = images
_UpperCAmelCase : List[Any] = labels
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Optional[Any] = 0
@property
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return self._images
@property
def __snake_case ( self ) -> Any:
'''simple docstring'''
return self._labels
@property
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
return self._num_examples
@property
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return self._epochs_completed
def __snake_case ( self , _A , _A=False , _A=True ) -> Tuple:
'''simple docstring'''
if fake_data:
_UpperCAmelCase : int = [1] * 7_84
_UpperCAmelCase : str = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_A )],
[fake_label for _ in range(_A )],
)
_UpperCAmelCase : Tuple = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_UpperCAmelCase : str = numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
_UpperCAmelCase : List[Any] = self.images[perma]
_UpperCAmelCase : Union[str, Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_UpperCAmelCase : List[Any] = self._num_examples - start
_UpperCAmelCase : str = self._images[start : self._num_examples]
_UpperCAmelCase : List[str] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_UpperCAmelCase : str = numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
_UpperCAmelCase : Optional[int] = self.images[perm]
_UpperCAmelCase : str = self.labels[perm]
# Start next epoch
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Tuple = batch_size - rest_num_examples
_UpperCAmelCase : Union[str, Any] = self._index_in_epoch
_UpperCAmelCase : Optional[int] = self._images[start:end]
_UpperCAmelCase : str = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_UpperCAmelCase : List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_lowerCAmelCase, """Please write your own downloading logic.""" )
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
if not gfile.Exists(_lowerCAmelCase ):
gfile.MakeDirs(_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = os.path.join(_lowerCAmelCase, _lowerCAmelCase )
if not gfile.Exists(_lowerCAmelCase ):
urllib.request.urlretrieve(_lowerCAmelCase, _lowerCAmelCase ) # noqa: S310
with gfile.GFile(_lowerCAmelCase ) as f:
_UpperCAmelCase : Optional[int] = f.size()
print("""Successfully downloaded""", _lowerCAmelCase, _lowerCAmelCase, """bytes.""" )
return filepath
@deprecated(
_lowerCAmelCase, """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : str=False, _lowerCAmelCase : List[str]=False, _lowerCAmelCase : Tuple=dtypes.floataa, _lowerCAmelCase : List[str]=True, _lowerCAmelCase : Union[str, Any]=5000, _lowerCAmelCase : Optional[Any]=None, _lowerCAmelCase : int=DEFAULT_SOURCE_URL, ) -> Optional[Any]:
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=_lowerCAmelCase, one_hot=_lowerCAmelCase, dtype=_lowerCAmelCase, seed=_lowerCAmelCase )
_UpperCAmelCase : List[Any] = fake()
_UpperCAmelCase : int = fake()
_UpperCAmelCase : Any = fake()
return _Datasets(train=_lowerCAmelCase, validation=_lowerCAmelCase, test=_lowerCAmelCase )
if not source_url: # empty string check
_UpperCAmelCase : Optional[Any] = DEFAULT_SOURCE_URL
_UpperCAmelCase : Optional[int] = """train-images-idx3-ubyte.gz"""
_UpperCAmelCase : int = """train-labels-idx1-ubyte.gz"""
_UpperCAmelCase : Optional[Any] = """t10k-images-idx3-ubyte.gz"""
_UpperCAmelCase : Tuple = """t10k-labels-idx1-ubyte.gz"""
_UpperCAmelCase : Tuple = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + train_images_file )
with gfile.Open(_lowerCAmelCase, """rb""" ) as f:
_UpperCAmelCase : Optional[int] = _extract_images(_lowerCAmelCase )
_UpperCAmelCase : Any = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + train_labels_file )
with gfile.Open(_lowerCAmelCase, """rb""" ) as f:
_UpperCAmelCase : Optional[int] = _extract_labels(_lowerCAmelCase, one_hot=_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + test_images_file )
with gfile.Open(_lowerCAmelCase, """rb""" ) as f:
_UpperCAmelCase : Union[str, Any] = _extract_images(_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = _maybe_download(
_lowerCAmelCase, _lowerCAmelCase, source_url + test_labels_file )
with gfile.Open(_lowerCAmelCase, """rb""" ) as f:
_UpperCAmelCase : List[Any] = _extract_labels(_lowerCAmelCase, one_hot=_lowerCAmelCase )
if not 0 <= validation_size <= len(_lowerCAmelCase ):
_UpperCAmelCase : int = (
"""Validation size should be between 0 and """
f'''{len(_lowerCAmelCase )}. Received: {validation_size}.'''
)
raise ValueError(_lowerCAmelCase )
_UpperCAmelCase : str = train_images[:validation_size]
_UpperCAmelCase : Union[str, Any] = train_labels[:validation_size]
_UpperCAmelCase : Optional[Any] = train_images[validation_size:]
_UpperCAmelCase : Optional[int] = train_labels[validation_size:]
_UpperCAmelCase : Optional[int] = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
_UpperCAmelCase : Tuple = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase )
_UpperCAmelCase : Dict = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase )
_UpperCAmelCase : List[Any] = _DataSet(_lowerCAmelCase, _lowerCAmelCase, **_lowerCAmelCase )
return _Datasets(train=_lowerCAmelCase, validation=_lowerCAmelCase, test=_lowerCAmelCase )
| 246 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["PerceiverFeatureExtractor"]
UpperCamelCase__ = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 353 |
UpperCamelCase__ = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
UpperCamelCase__ = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def _UpperCamelCase (a__ :float , a__ :str , a__ :str ):
"""simple docstring"""
UpperCamelCase__ = from_type.lower().strip("""s""" )
UpperCamelCase__ = to_type.lower().strip("""s""" )
UpperCamelCase__ = UNIT_SYMBOL.get(a__ , a__ )
UpperCamelCase__ = UNIT_SYMBOL.get(a__ , a__ )
if from_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
if to_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
UpperCamelCase__ = METRIC_CONVERSION[from_sanitized]
UpperCamelCase__ = METRIC_CONVERSION[to_sanitized]
UpperCamelCase__ = 1
if from_exponent > to_exponent:
UpperCamelCase__ = from_exponent - to_exponent
else:
UpperCamelCase__ = -(to_exponent - from_exponent)
return value * pow(10 , a__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 87 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a_ )
class lowerCAmelCase_ ( a_ ):
UpperCAmelCase__ : int = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCAmelCase__ : Tuple = Features({"text": Value("string" )} )
UpperCAmelCase__ : int = Features({} )
UpperCAmelCase__ : Tuple = "text"
@property
def snake_case_ ( self ) -> List[Any]:
return {self.text_column: "text"}
| 119 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase : List[Any] =logging.get_logger(__name__)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
return [
int(10_00 * (box[0] / width)),
int(10_00 * (box[1] / height)),
int(10_00 * (box[2] / width)),
int(10_00 * (box[3] / height)),
]
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None):
UpperCamelCase_ = tesseract_config if tesseract_config is not None else ""
# apply OCR
UpperCamelCase_ = to_pil_image(_lowerCAmelCase)
UpperCamelCase_ , UpperCamelCase_ = pil_image.size
UpperCamelCase_ = pytesseract.image_to_data(_lowerCAmelCase , lang=_lowerCAmelCase , output_type="dict" , config=_lowerCAmelCase)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
UpperCamelCase_ = [idx for idx, word in enumerate(_lowerCAmelCase) if not word.strip()]
UpperCamelCase_ = [word for idx, word in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase_ = []
for x, y, w, h in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = [x, y, x + w, y + h]
actual_boxes.append(_lowerCAmelCase)
# finally, normalize the bounding boxes
UpperCamelCase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase))
assert len(_lowerCAmelCase) == len(_lowerCAmelCase), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = ["""pixel_values"""]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = None , snake_case__ = "" , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCamelCase_ = size if size is not None else {"height": 224, "width": 224}
UpperCamelCase_ = get_size_dict(snake_case__ )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = resample
UpperCamelCase_ = apply_ocr
UpperCamelCase_ = ocr_lang
UpperCamelCase_ = tesseract_config
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCamelCase_ = (size["height"], size["width"])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(snake_case__ )
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase_ = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(snake_case__ ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
UpperCamelCase_ = []
UpperCamelCase_ = []
for image in images:
UpperCamelCase_ , UpperCamelCase_ = apply_tesseract(snake_case__ , snake_case__ , snake_case__ )
words_batch.append(snake_case__ )
boxes_batch.append(snake_case__ )
if do_resize:
UpperCamelCase_ = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase_ = [flip_channel_order(snake_case__ ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
UpperCamelCase_ = BatchFeature(data={"pixel_values": images} , tensor_type=snake_case__ )
if apply_ocr:
UpperCamelCase_ = words_batch
UpperCamelCase_ = boxes_batch
return data
| 128 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : str = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ["""GLPNFeatureExtractor"""]
_lowercase : Optional[Any] = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_lowercase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Union[str, Any] = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ["""PoolFormerFeatureExtractor"""]
_lowercase : Any = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 91 | 1 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = MobileBertTokenizer
SCREAMING_SNAKE_CASE = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = filter_non_english
SCREAMING_SNAKE_CASE = 'google/mobilebert-uncased'
def __magic_name__ ( self ) -> int:
'''simple docstring'''
super().setUp()
__a =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__a =[
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
__a ='UNwant\u00E9d,running'
__a ='unwanted, running'
return input_text, output_text
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.tokenizer_class(self.vocab_file )
__a =tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__snake_case , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [9, 6, 7, 12, 10, 11] )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a =self.get_tokenizer()
__a =self.get_rust_tokenizer()
__a ='UNwant\u00E9d,running'
__a =tokenizer.tokenize(__snake_case )
__a =rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
__a =rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =self.get_rust_tokenizer()
__a =tokenizer.encode(__snake_case )
__a =rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# With lower casing
__a =self.get_tokenizer(do_lower_case=__snake_case )
__a =self.get_rust_tokenizer(do_lower_case=__snake_case )
__a ='UNwant\u00E9d,running'
__a =tokenizer.tokenize(__snake_case )
__a =rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
__a =rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =self.get_rust_tokenizer()
__a =tokenizer.encode(__snake_case )
__a =rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =BasicTokenizer(do_lower_case=__snake_case , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__a ={}
for i, token in enumerate(__snake_case ):
__a =i
__a =WordpieceTokenizer(vocab=__snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.get_tokenizer()
__a =self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__snake_case ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__snake_case ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
__a =tokenizer.encode('sequence builders' , add_special_tokens=__snake_case )
__a =tokenizer.encode('multi-sequence build' , add_special_tokens=__snake_case )
__a =tokenizer.build_inputs_with_special_tokens(__snake_case )
__a =tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__a =tokenizer_r.encode_plus(
__snake_case , return_attention_mask=__snake_case , return_token_type_ids=__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case , )
__a =tokenizer_r.do_lower_case if hasattr(__snake_case , 'do_lower_case' ) else False
__a =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =['的', '人', '有']
__a =''.join(__snake_case )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =True
__a =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case )
__a =tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case )
__a =tokenizer_r.convert_ids_to_tokens(__snake_case )
__a =tokenizer_p.convert_ids_to_tokens(__snake_case )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =False
__a =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case )
__a =tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case )
__a =tokenizer_r.convert_ids_to_tokens(__snake_case )
__a =tokenizer_p.convert_ids_to_tokens(__snake_case )
# it is expected that only the first Chinese character is not preceded by "##".
__a =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(__snake_case )
]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
| 218 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = RoCBertTokenizer
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = filter_non_english
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
__a =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__a ={}
__a ={}
for i, value in enumerate(__snake_case ):
__a =i
__a =i
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(__snake_case , __snake_case , ensure_ascii=__snake_case )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(__snake_case , __snake_case , ensure_ascii=__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__a =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(__snake_case , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__snake_case ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__snake_case ) , [5, 6, 2, 5, 7, 8] )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =RoCBertBasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =RoCBertBasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =RoCBertBasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =RoCBertBasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =RoCBertBasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =RoCBertBasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =RoCBertBasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =RoCBertBasicTokenizer(do_lower_case=__snake_case , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__a ={}
for i, token in enumerate(__snake_case ):
__a =i
__a =RoCBertWordpieceTokenizer(vocab=__snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__snake_case ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__a =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__snake_case ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__a =tokenizer_r.encode_plus(
__snake_case , return_attention_mask=__snake_case , return_token_type_ids=__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case , )
__a =tokenizer_r.do_lower_case if hasattr(__snake_case , 'do_lower_case' ) else False
__a =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =['的', '人', '有']
__a =''.join(__snake_case )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__a =True
__a =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case )
__a =tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case )
__a =tokenizer_r.convert_ids_to_tokens(__snake_case )
__a =tokenizer_p.convert_ids_to_tokens(__snake_case )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
__a =False
__a =self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
__a =tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case )
__a =tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case )
__a =tokenizer_r.convert_ids_to_tokens(__snake_case )
__a =tokenizer_p.convert_ids_to_tokens(__snake_case )
# it is expected that only the first Chinese character is not preceded by "##".
__a =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(__snake_case )
]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__a =tokenizer.encode('你好' , add_special_tokens=__snake_case )
__a =tokenizer.encode('你是谁' , add_special_tokens=__snake_case )
__a =tokenizer.build_inputs_with_special_tokens(__snake_case )
__a =tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.get_tokenizers(do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__a ='你好,你是谁'
__a =tokenizer.tokenize(__snake_case )
__a =tokenizer.convert_tokens_to_ids(__snake_case )
__a =tokenizer.convert_tokens_to_shape_ids(__snake_case )
__a =tokenizer.convert_tokens_to_pronunciation_ids(__snake_case )
__a =tokenizer.prepare_for_model(
__snake_case , __snake_case , __snake_case , add_special_tokens=__snake_case )
__a =tokenizer.encode_plus(__snake_case , add_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
| 218 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , A_ : Optional[Any] , A_ : Union[str, Any]=7 , A_ : Optional[int]=3 , A_ : Tuple=18 , A_ : Dict=30 , A_ : int=400 , A_ : int=True , A_ : Tuple=32 , A_ : str=True , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size_divisor
lowerCamelCase_ = do_rescale
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = GLPNImageProcessor if is_vision_available() else None
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = GLPNImageProcessingTester(self )
@property
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size_divisor' ) )
self.assertTrue(hasattr(A_ , 'resample' ) )
self.assertTrue(hasattr(A_ , 'do_rescale' ) )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 368 |
from ..utils import DummyObject, requires_backends
class A( metaclass=UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''keras_nlp''']
def __init__( self : Optional[int] , *A_ : Any , **A_ : Dict ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['keras_nlp'] )
| 208 | 0 |
import os
def UpperCAmelCase__ ( _A : Any ):
'''simple docstring'''
a__ =len(grid[0] )
a__ =len(_A )
a__ =0
a__ =0
a__ =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_A ):
for j in range(n_rows - 3 ):
a__ =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
a__ =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
a__ =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
a__ =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
a__ =max(
_A , _A , _A , _A )
if max_product > largest:
a__ =max_product
return largest
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =[]
with open(os.path.dirname(_A ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
a__ =[[int(_A ) for i in grid[j]] for j in range(len(_A ) )]
return largest_product(_A )
if __name__ == "__main__":
print(solution())
| 188 |
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
def UpperCAmelCase__ ( *_A : Optional[Any] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Union[str, Any] , **_A : Tuple ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : List[str] , **_A : List[str] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Dict , **_A : Dict ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : List[str] , **_A : str ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Optional[int] , **_A : Dict ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Any = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Any = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
| 188 | 1 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
__snake_case : Optional[int] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class _A ( lowerCAmelCase_ ):
lowercase__: Dict = """sigmoid"""
lowercase__: Optional[int] = """softmax"""
lowercase__: List[str] = """none"""
@add_end_docstrings(
lowerCAmelCase_ , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
''' , )
class _A ( lowerCAmelCase_ ):
lowercase__: Optional[Any] = False
lowercase__: Optional[int] = ClassificationFunction.NONE
def __init__( self : List[Any] , **__magic_name__ : List[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowercase__ ( self : List[str] , __magic_name__ : Union[str, Any]=None , __magic_name__ : List[str]=None , __magic_name__ : Dict="" , **__magic_name__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = tokenizer_kwargs
__snake_case : Optional[int] = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
__snake_case : Union[str, Any] = self.model.config.return_all_scores
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or top_k is None:
__snake_case : Optional[Any] = top_k
__snake_case : List[Any] = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , __SCREAMING_SNAKE_CASE , )
if return_all_scores:
__snake_case : List[str] = None
else:
__snake_case : Optional[int] = 1
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : Union[str, Any] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__snake_case : List[str] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , *__magic_name__ : List[str] , **__magic_name__ : Any ) -> Dict:
"""simple docstring"""
__snake_case : Any = super().__call__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__snake_case : Optional[Any] = """top_k""" not in kwargs
if isinstance(args[0] , __SCREAMING_SNAKE_CASE ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowercase__ ( self : Any , __magic_name__ : List[Any] , **__magic_name__ : List[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Tuple = self.framework
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return self.tokenizer(**__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , __SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowercase__ ( self : Tuple , __magic_name__ : List[str] ) -> Tuple:
"""simple docstring"""
return self.model(**__SCREAMING_SNAKE_CASE )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=True ) -> int:
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__snake_case : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__snake_case : Any = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
__snake_case : Any = self.model.config.function_to_apply
else:
__snake_case : List[str] = ClassificationFunction.NONE
__snake_case : Optional[int] = model_outputs["""logits"""][0]
__snake_case : Tuple = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__snake_case : List[str] = sigmoid(__SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__snake_case : List[str] = softmax(__SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.NONE:
__snake_case : str = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__snake_case : Optional[int] = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(__SCREAMING_SNAKE_CASE )
]
if not _legacy:
dict_scores.sort(key=lambda __magic_name__ : x["score"] , reverse=__SCREAMING_SNAKE_CASE )
if top_k is not None:
__snake_case : Union[str, Any] = dict_scores[:top_k]
return dict_scores
| 351 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _A :
def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=2 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Dict=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : List[Any]=99 , __magic_name__ : List[Any]=36 , __magic_name__ : List[Any]=2 , __magic_name__ : str=4 , __magic_name__ : int=37 , __magic_name__ : int="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=6 , __magic_name__ : Dict=6 , __magic_name__ : Optional[Any]=3 , __magic_name__ : str=4 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=10_00 , ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Tuple = batch_size
__snake_case : List[Any] = num_channels
__snake_case : Dict = image_size
__snake_case : Tuple = patch_size
__snake_case : str = is_training
__snake_case : Optional[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : str = use_labels
__snake_case : Dict = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : Optional[int] = type_vocab_size
__snake_case : Tuple = type_sequence_label_size
__snake_case : int = initializer_range
__snake_case : Optional[int] = coordinate_size
__snake_case : List[Any] = shape_size
__snake_case : Tuple = num_labels
__snake_case : List[Any] = num_choices
__snake_case : Optional[Any] = scope
__snake_case : List[str] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__snake_case : List[str] = text_seq_length
__snake_case : str = (image_size // patch_size) ** 2 + 1
__snake_case : Optional[Any] = self.text_seq_length + self.image_seq_length
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__snake_case : Optional[int] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : Union[str, Any] = bbox[i, j, 3]
__snake_case : Union[str, Any] = bbox[i, j, 1]
__snake_case : Any = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : Optional[Any] = bbox[i, j, 2]
__snake_case : Tuple = bbox[i, j, 0]
__snake_case : Optional[Any] = tmp_coordinate
__snake_case : Dict = tf.constant(__magic_name__ )
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Any = None
if self.use_input_mask:
__snake_case : str = random_attention_mask([self.batch_size, self.text_seq_length] )
__snake_case : List[Any] = None
if self.use_token_type_ids:
__snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__snake_case : str = None
__snake_case : List[Any] = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__snake_case : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = TFLayoutLMvaModel(config=__magic_name__ )
# text + image
__snake_case : Optional[int] = model(__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
__snake_case : List[str] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , training=__magic_name__ , )
__snake_case : Optional[int] = model(__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__snake_case : Union[str, Any] = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__snake_case : Optional[Any] = model({"""pixel_values""": pixel_values} , training=__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : str ) -> Any:
"""simple docstring"""
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = TFLayoutLMvaForSequenceClassification(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : str = self.num_labels
__snake_case : str = TFLayoutLMvaForTokenClassification(config=__magic_name__ )
__snake_case : Tuple = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = 2
__snake_case : Dict = TFLayoutLMvaForQuestionAnswering(config=__magic_name__ )
__snake_case : List[Any] = model(
__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , training=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Dict = config_and_inputs
__snake_case : List[Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Optional[int] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase__: Union[str, Any] = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowercase__: Dict = False
lowercase__: int = False
lowercase__: Dict = False
def lowercase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return True
def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int=False ) -> dict:
"""simple docstring"""
__snake_case : Any = copy.deepcopy(__magic_name__ )
if model_class in get_values(__magic_name__ ):
__snake_case : Union[str, Any] = {
k: tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__magic_name__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__magic_name__ ):
__snake_case : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : str = TFLayoutLMvaModelTester(self )
__snake_case : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
if getattr(__magic_name__ , """hf_compute_loss""" , __magic_name__ ):
# The number of elements in the loss should be the same as the number of elements in the label
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Any = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__magic_name__ )[0]
]
__snake_case : List[str] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__snake_case : Any = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = prepared_for_class.pop("""input_ids""" )
__snake_case : Union[str, Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : str = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
__snake_case : str = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__snake_case : Dict = -1_00
__snake_case : str = tf.convert_to_tensor(__magic_name__ )
__snake_case : Optional[Any] = model(__magic_name__ , **__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__snake_case : Optional[int] = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
__snake_case : Tuple = model(__magic_name__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__snake_case : str = self._prepare_for_class(inputs_dict.copy() , __magic_name__ , return_labels=__magic_name__ )
# Get keys that were added with the _prepare_for_class function
__snake_case : Tuple = prepared_for_class.keys() - inputs_dict.keys()
__snake_case : Optional[Any] = inspect.signature(model.call ).parameters
__snake_case : int = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__snake_case : Union[str, Any] = {0: """input_ids"""}
for label_key in label_keys:
__snake_case : int = signature_names.index(__magic_name__ )
__snake_case : Optional[int] = label_key
__snake_case : Optional[int] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__snake_case : Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__snake_case : List[str] = prepared_for_class[value]
__snake_case : str = tuple(__magic_name__ )
# Send to model
__snake_case : List[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Tuple = type
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = TFLayoutLMvaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__magic_name__ ) if is_vision_available() else None
@slow
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
__snake_case : Dict = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
__snake_case : str = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__magic_name__ , return_tensors="""tf""" ).pixel_values
__snake_case : Tuple = tf.constant([[1, 2]] )
__snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__snake_case : List[Any] = model(input_ids=__magic_name__ , bbox=__magic_name__ , pixel_values=__magic_name__ , training=__magic_name__ )
# verify the logits
__snake_case : List[str] = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
__snake_case : Tuple = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 13 | 0 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = [0] * len(__UpperCamelCase )
for i in range(1 , len(__UpperCamelCase ) ):
# use last results for better performance - dynamic programming
UpperCAmelCase : Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCAmelCase : Optional[int] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCAmelCase : Any = j
return prefix_result
def lowercase ( __magic_name__ ):
'''simple docstring'''
return max(prefix_function(__UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = "Salesforce/blip-image-captioning-base"
lowerCAmelCase_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
lowerCAmelCase_ = "image_captioner"
lowerCAmelCase_ = AutoModelForVisionaSeq
lowerCAmelCase_ = ["image"]
lowerCAmelCase_ = ["text"]
def __init__( self : List[Any] , *_lowercase : Optional[int] , **_lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*_lowercase , **_lowercase )
def __a ( self : Tuple , _lowercase : "Image" ):
"""simple docstring"""
return self.pre_processor(images=_lowercase , return_tensors="""pt""" )
def __a ( self : Union[str, Any] , _lowercase : Optional[int] ):
"""simple docstring"""
return self.model.generate(**_lowercase )
def __a ( self : int , _lowercase : Any ):
"""simple docstring"""
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase )[0].strip()
| 219 | 0 |
"""simple docstring"""
__UpperCamelCase = [
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 369 | """simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {}
__UpperCamelCase = {}
__UpperCamelCase = {}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]:
snake_case_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
snake_case_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
snake_case_ = format_type
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
snake_case_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter:
snake_case_ = get_format_type_from_alias(UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 312 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ["image_processor", "tokenizer"]
__UpperCAmelCase : List[str] = "AutoImageProcessor"
__UpperCAmelCase : List[Any] = "AutoTokenizer"
def __init__( self : List[Any], UpperCAmelCase__ : int=None, UpperCAmelCase__ : Any=None, **UpperCAmelCase__ : Tuple ):
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", UpperCAmelCase__, )
__lowercase = kwargs.pop("feature_extractor" )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.image_processor
__lowercase = False
def __call__( self : Optional[Any], *UpperCAmelCase__ : int, **UpperCAmelCase__ : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase__, **UpperCAmelCase__ )
__lowercase = kwargs.pop("images", UpperCAmelCase__ )
__lowercase = kwargs.pop("text", UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
__lowercase = args[0]
__lowercase = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
__lowercase = self.image_processor(UpperCAmelCase__, *UpperCAmelCase__, **UpperCAmelCase__ )
if text is not None:
__lowercase = self.tokenizer(UpperCAmelCase__, **UpperCAmelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowercase = encodings["input_ids"]
return inputs
def _lowercase ( self : Union[str, Any], *UpperCAmelCase__ : int, **UpperCAmelCase__ : int ):
return self.tokenizer.batch_decode(*UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Any, *UpperCAmelCase__ : str, **UpperCAmelCase__ : Optional[Any] ):
return self.tokenizer.decode(*UpperCAmelCase__, **UpperCAmelCase__ )
@contextmanager
def _lowercase ( self : str ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
__lowercase = True
__lowercase = self.tokenizer
yield
__lowercase = self.image_processor
__lowercase = False
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any]=False, UpperCAmelCase__ : List[Any]=None ):
if added_vocab is None:
__lowercase = self.tokenizer.get_added_vocab()
__lowercase = {}
while tokens:
__lowercase = re.search(r"<s_(.*?)>", UpperCAmelCase__, re.IGNORECASE )
if start_token is None:
break
__lowercase = start_token.group(1 )
__lowercase = re.search(rF"""</s_{key}>""", UpperCAmelCase__, re.IGNORECASE )
__lowercase = start_token.group()
if end_token is None:
__lowercase = tokens.replace(UpperCAmelCase__, "" )
else:
__lowercase = end_token.group()
__lowercase = re.escape(UpperCAmelCase__ )
__lowercase = re.escape(UpperCAmelCase__ )
__lowercase = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""", UpperCAmelCase__, re.IGNORECASE )
if content is not None:
__lowercase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowercase = self.tokenajson(UpperCAmelCase__, is_inner_value=UpperCAmelCase__, added_vocab=UpperCAmelCase__ )
if value:
if len(UpperCAmelCase__ ) == 1:
__lowercase = value[0]
__lowercase = value
else: # leaf nodes
__lowercase = []
for leaf in content.split(r"<sep/>" ):
__lowercase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowercase = leaf[1:-2] # for categorical special tokens
output[key].append(UpperCAmelCase__ )
if len(output[key] ) == 1:
__lowercase = output[key][0]
__lowercase = tokens[tokens.find(UpperCAmelCase__ ) + len(UpperCAmelCase__ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=UpperCAmelCase__, added_vocab=UpperCAmelCase__ )
if len(UpperCAmelCase__ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _lowercase ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", UpperCAmelCase__, )
return self.image_processor_class
@property
def _lowercase ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", UpperCAmelCase__, )
return self.image_processor
| 17 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int=1_0_0, UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : List[Any]=3_0, UpperCAmelCase__ : Dict=2, UpperCAmelCase__ : Any=3, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : Any=5, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Any=3_7, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Dict=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Dict=1_0, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : List[Any]=3, ):
__lowercase = parent
__lowercase = vocab_size
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def _lowercase ( self : int ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size )
__lowercase = BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, )
return config, pixel_values, labels
def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str] ):
__lowercase = FlaxBeitModel(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ):
__lowercase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any] ):
__lowercase = self.type_sequence_label_size
__lowercase = FlaxBeitForImageClassification(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = FlaxBeitForImageClassification(UpperCAmelCase__ )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _lowercase ( self : List[Any] ):
__lowercase = FlaxBeitModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(UpperCAmelCase__ )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1], UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : str, **UpperCAmelCase__ : Dict ):
return model(pixel_values=UpperCAmelCase__, **UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__, UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape, output.shape )
def _lowercase ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def _lowercase ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(UpperCAmelCase__ )
def _A ( ) -> str:
'''simple docstring'''
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_vision
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ).pixel_values
# prepare bool_masked_pos
__lowercase = np.ones((1, 1_9_6), dtype=UpperCAmelCase__ )
# forward pass
__lowercase = model(pixel_values=UpperCAmelCase__, bool_masked_pos=UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], UpperCAmelCase__, atol=1E-2 ) )
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_0_0_0)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_8_1
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[str] ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 2_1_8_4_1)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
| 17 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
A__ : Dict =False
class UpperCAmelCase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : str ) -> List[str]:
_lowerCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
image=__snake_case , generator=__snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_lowerCAmelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowerCAmelCase = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 220 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if not (isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
_lowerCAmelCase = len(lowerCAmelCase )
_lowerCAmelCase = len(lowerCAmelCase )
_lowerCAmelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_lowerCAmelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_lowerCAmelCase = i
_lowerCAmelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_lowerCAmelCase = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_lowerCAmelCase = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = set()
lowerCAmelCase__ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : Dict = char
lowerCAmelCase__ : Any = set(UpperCamelCase )
return pairs
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="<mask>" ,**__UpperCAmelCase ,) -> List[str]:
super().__init__(
bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,**__UpperCAmelCase ,)
lowerCAmelCase__ : List[Any] = vocab_file
lowerCAmelCase__ : Dict = merges_file
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Optional[Any] = 1
lowerCAmelCase__ : Union[str, Any] = 2
lowerCAmelCase__ : Optional[int] = 3
self.add_from_file(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase__ : int = merges_handle.read().split("""\n""" )[:-1]
lowerCAmelCase__ : Optional[int] = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCAmelCase__ : Dict = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : Dict = {}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
lowerCAmelCase__ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self ) -> Tuple:
return len(self.encoder )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : List[Any] = tuple(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowerCAmelCase__ : Optional[int] = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ : Tuple = min(__UpperCAmelCase ,key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = bigram
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : List[str] = 0
while i < len(__UpperCAmelCase ):
try:
lowerCAmelCase__ : Any = word.index(__UpperCAmelCase ,__UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : Dict = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : str = tuple(__UpperCAmelCase )
lowerCAmelCase__ : int = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
lowerCAmelCase__ : Any = get_pairs(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = """@@ """.join(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = word[:-4]
lowerCAmelCase__ : Optional[Any] = word
return word
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : str = re.findall(R"""\S+\n?""" ,__UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
return self.encoder.get(__UpperCAmelCase ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
return self.decoder.get(__UpperCAmelCase ,self.unk_token )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[str] = """ """.join(__UpperCAmelCase ).replace("""@@ """ ,"""""" ).strip()
return out_string
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : List[str] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : List[str] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file ,__UpperCAmelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.merges_file ,__UpperCAmelCase )
return out_vocab_file, out_merge_file
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
try:
with open(__UpperCAmelCase ,"""r""" ,encoding="""utf-8""" ) as fd:
self.add_from_file(__UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
lowerCAmelCase__ : Tuple = f.readlines()
for lineTmp in lines:
lowerCAmelCase__ : Optional[int] = lineTmp.strip()
lowerCAmelCase__ : List[str] = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
lowerCAmelCase__ : Optional[Any] = line[:idx]
lowerCAmelCase__ : int = len(self.encoder )
| 37 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
try:
with open(UpperCamelCase , """rb""" ) as flax_state_f:
lowerCAmelCase__ : Union[str, Any] = from_bytes(UpperCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(UpperCamelCase ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowerCAmelCase__ : str = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase : x.dtype == jnp.bfloataa , UpperCamelCase ) ).values()
if any(UpperCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowerCAmelCase__ : Dict = jax.tree_util.tree_map(
lambda UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase )
lowerCAmelCase__ : Any = """"""
lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase , sep=""".""" )
lowerCAmelCase__ : Optional[int] = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCAmelCase__ : Union[str, Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCAmelCase__ : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCAmelCase__ : Any = jnp.transpose(UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCAmelCase__ : str = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCAmelCase__ : Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCAmelCase__ : int = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(UpperCamelCase ):
lowerCAmelCase__ : List[str] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
lowerCAmelCase__ : Union[str, Any] = """.""".join(UpperCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowerCAmelCase__ : int = np.asarray(UpperCamelCase ) if not isinstance(UpperCamelCase , np.ndarray ) else flax_tensor
lowerCAmelCase__ : int = torch.from_numpy(UpperCamelCase )
# remove from missing keys
missing_keys.remove(UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCamelCase )
pt_model.load_state_dict(UpperCamelCase )
# re-transform missing_keys to list
lowerCAmelCase__ : Optional[int] = list(UpperCamelCase )
if len(UpperCamelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(UpperCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
return pt_model
| 37 | 1 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __a :
@staticmethod
def __lowercase ( *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class __a ( unittest.TestCase ):
_lowerCAmelCase : Optional[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCamelCase__ : str = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Any = vqa_pipeline(SCREAMING_SNAKE_CASE , top_k=1 )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[{"score": ANY(SCREAMING_SNAKE_CASE ), "answer": ANY(SCREAMING_SNAKE_CASE )}],
[{"score": ANY(SCREAMING_SNAKE_CASE ), "answer": ANY(SCREAMING_SNAKE_CASE )}],
] , )
@require_torch
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Dict = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCamelCase__ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCamelCase__ : Optional[int] = "How many cats are there?"
UpperCamelCase__ : Union[str, Any] = vqa_pipeline(image=SCREAMING_SNAKE_CASE , question="How many cats are there?" , top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE , [{"score": ANY(SCREAMING_SNAKE_CASE ), "answer": ANY(SCREAMING_SNAKE_CASE )}, {"score": ANY(SCREAMING_SNAKE_CASE ), "answer": ANY(SCREAMING_SNAKE_CASE )}] )
UpperCamelCase__ : Optional[int] = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE , [{"score": ANY(SCREAMING_SNAKE_CASE ), "answer": ANY(SCREAMING_SNAKE_CASE )}, {"score": ANY(SCREAMING_SNAKE_CASE ), "answer": ANY(SCREAMING_SNAKE_CASE )}] )
@slow
@require_torch
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Tuple = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
UpperCamelCase__ : Union[str, Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCamelCase__ : Dict = "How many cats are there?"
UpperCamelCase__ : Union[str, Any] = vqa_pipeline(image=SCREAMING_SNAKE_CASE , question=SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] )
UpperCamelCase__ : Optional[Any] = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] )
UpperCamelCase__ : Tuple = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [[{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass | 196 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Any = len(__lowerCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase__ : Optional[int] = i + 1
else:
UpperCamelCase__ : Dict = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""") | 196 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=A__ ):
A__ = ["""flax"""]
def __init__( self : Dict , *_a : Optional[Any] , **_a : List[str] ) -> Any:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def A ( cls : Optional[Any] , *_a : Union[str, Any] , **_a : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def A ( cls : int , *_a : Union[str, Any] , **_a : Any ) -> str:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class A__ ( metaclass=A__ ):
A__ = ["""flax"""]
def __init__( self : Dict , *_a : Optional[Any] , **_a : Optional[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def A ( cls : str , *_a : Union[str, Any] , **_a : List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def A ( cls : List[Any] , *_a : Optional[Any] , **_a : List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class A__ ( metaclass=A__ ):
A__ = ["""flax"""]
def __init__( self : Any , *_a : Optional[int] , **_a : str ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def A ( cls : Dict , *_a : Tuple , **_a : Dict ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def A ( cls : Union[str, Any] , *_a : Any , **_a : Union[str, Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class A__ ( metaclass=A__ ):
A__ = ["""flax"""]
def __init__( self : str , *_a : Optional[int] , **_a : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def A ( cls : Dict , *_a : Dict , **_a : str ) -> int:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def A ( cls : List[Any] , *_a : Optional[int] , **_a : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class A__ ( metaclass=A__ ):
A__ = ["""flax"""]
def __init__( self : Optional[Any] , *_a : Optional[Any] , **_a : Optional[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def A ( cls : str , *_a : Optional[Any] , **_a : List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def A ( cls : List[str] , *_a : Union[str, Any] , **_a : List[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class A__ ( metaclass=A__ ):
A__ = ["""flax"""]
def __init__( self : Union[str, Any] , *_a : Dict , **_a : Any ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def A ( cls : Tuple , *_a : Optional[Any] , **_a : Tuple ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def A ( cls : Optional[int] , *_a : List[Any] , **_a : Any ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class A__ ( metaclass=A__ ):
A__ = ["""flax"""]
def __init__( self : Union[str, Any] , *_a : str , **_a : Any ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def A ( cls : Optional[Any] , *_a : Any , **_a : Tuple ) -> str:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def A ( cls : Optional[Any] , *_a : Optional[int] , **_a : str ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class A__ ( metaclass=A__ ):
A__ = ["""flax"""]
def __init__( self : Dict , *_a : str , **_a : int ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def A ( cls : str , *_a : List[Any] , **_a : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def A ( cls : List[Any] , *_a : List[Any] , **_a : List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class A__ ( metaclass=A__ ):
A__ = ["""flax"""]
def __init__( self : Any , *_a : Any , **_a : int ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def A ( cls : Dict , *_a : Tuple , **_a : Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def A ( cls : List[Any] , *_a : Dict , **_a : Dict ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class A__ ( metaclass=A__ ):
A__ = ["""flax"""]
def __init__( self : Union[str, Any] , *_a : Any , **_a : List[Any] ) -> Any:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def A ( cls : Dict , *_a : List[Any] , **_a : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def A ( cls : int , *_a : List[Any] , **_a : Tuple ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class A__ ( metaclass=A__ ):
A__ = ["""flax"""]
def __init__( self : Tuple , *_a : Optional[int] , **_a : Union[str, Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def A ( cls : int , *_a : List[str] , **_a : Optional[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def A ( cls : Union[str, Any] , *_a : Any , **_a : Any ) -> Any:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class A__ ( metaclass=A__ ):
A__ = ["""flax"""]
def __init__( self : Optional[Any] , *_a : Optional[Any] , **_a : Dict ) -> int:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def A ( cls : List[str] , *_a : int , **_a : List[str] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def A ( cls : List[str] , *_a : int , **_a : str ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
class A__ ( metaclass=A__ ):
A__ = ["""flax"""]
def __init__( self : List[str] , *_a : Optional[Any] , **_a : List[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def A ( cls : int , *_a : Optional[int] , **_a : Dict ) -> int:
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def A ( cls : List[str] , *_a : Union[str, Any] , **_a : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['flax'] )
| 47 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : int = inspect.getfile(accelerate.test_utils )
lowerCAmelCase_ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
lowerCAmelCase_ : Dict = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
lowerCAmelCase_ : Dict = [sys.executable] + distributed_args
execute_subprocess_async(a_ , env=os.environ.copy() )
| 241 | 0 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : int ):
"""simple docstring"""
debug_launcher(test_script.main )
def lowercase_ ( self : int ):
"""simple docstring"""
debug_launcher(test_ops.main ) | 359 |
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 31 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.