code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
lowerCamelCase__ = range(2, 20 + 1)
lowerCamelCase__ = [10**k for k in range(ks[-1] + 1)]
lowerCamelCase__ = {}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : Dict = sum(a_i[j] for j in range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase__ : Optional[Any] = 0, 0
lowerCAmelCase__ : Tuple = n - i
lowerCAmelCase__ : Optional[int] = memo.get(__SCREAMING_SNAKE_CASE )
if sub_memo is not None:
lowerCAmelCase__ : List[str] = sub_memo.get(__SCREAMING_SNAKE_CASE )
if jumps is not None and len(__SCREAMING_SNAKE_CASE ) > 0:
# find and make the largest jump without going over
lowerCAmelCase__ : Union[str, Any] = -1
for _k in range(len(__SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCAmelCase__ : Any = _k
break
if max_jump >= 0:
lowerCAmelCase__ : Dict = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCAmelCase__ : Tuple = diff + c
for j in range(min(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ) ):
lowerCAmelCase__ : int = divmod(__SCREAMING_SNAKE_CASE , 10 )
if new_c > 0:
add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ : Optional[int] = []
else:
lowerCAmelCase__ : int = {c: []}
lowerCAmelCase__ : Union[str, Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCAmelCase__ : Any = next_term(__SCREAMING_SNAKE_CASE , k - 1 , i + dn , __SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCAmelCase__ : Tuple = compute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + dn , __SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
lowerCAmelCase__ : Tuple = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCAmelCase__ : Any = 0
while j < len(__SCREAMING_SNAKE_CASE ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__SCREAMING_SNAKE_CASE , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if i >= n:
return 0, i
if k > len(__SCREAMING_SNAKE_CASE ):
a_i.extend([0 for _ in range(k - len(__SCREAMING_SNAKE_CASE ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCAmelCase__ : List[Any] = i
lowerCAmelCase__ : Optional[int] = 0, 0, 0
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCAmelCase__ : int = ds_c + ds_b
diff += addend
lowerCAmelCase__ : Union[str, Any] = 0
for j in range(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Tuple = a_i[j] + addend
lowerCAmelCase__ : Optional[Any] = divmod(__SCREAMING_SNAKE_CASE , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return diff, i - start_i
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
for j in range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ : List[str] = digits[j] + addend
if s >= 10:
lowerCAmelCase__ : Union[str, Any] = divmod(__SCREAMING_SNAKE_CASE , 10 )
lowerCAmelCase__ : Dict = addend // 10 + quotient
else:
lowerCAmelCase__ : Union[str, Any] = s
lowerCAmelCase__ : Any = addend // 10
if addend == 0:
break
while addend > 0:
lowerCAmelCase__ : Union[str, Any] = divmod(__SCREAMING_SNAKE_CASE , 10 )
digits.append(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10**15 ) -> int:
lowerCAmelCase__ : Any = [1]
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Tuple = 0
while True:
lowerCAmelCase__ : Union[str, Any] = next_term(__SCREAMING_SNAKE_CASE , 20 , i + dn , __SCREAMING_SNAKE_CASE )
dn += terms_jumped
if dn == n - i:
break
lowerCAmelCase__ : Optional[Any] = 0
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 370
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A__ :
def __init__( self : List[str] , a : Any , a : Dict=13 , a : Optional[Any]=7 , a : Tuple=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=True , a : Dict=99 , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : Union[str, Any]=37 , a : Any="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Union[str, Any]=16 , a : Optional[int]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Tuple = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : int = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : List[Any] = scope
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = LlamaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Dict , a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = LlamaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , encoder_hidden_states=a , )
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] , a : Any , a : List[str] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : str , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowerCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = LlamaModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : int = type
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Dict = input_dict['input_ids']
lowerCAmelCase__ : Optional[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : Tuple = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : List[str] = 'single_label_classification'
lowerCAmelCase__ : List[Any] = input_dict['input_ids']
lowerCAmelCase__ : List[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : int = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Optional[Any] = 'multi_label_classification'
lowerCAmelCase__ : List[str] = input_dict['input_ids']
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ : Dict = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : List[Any] = LlamaModel(a )
original_model.to(a )
original_model.eval()
lowerCAmelCase__ : List[Any] = original_model(a ).last_hidden_state
lowerCAmelCase__ : str = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : Any = {'type': scaling_type, 'factor': 1_0.0}
lowerCAmelCase__ : Union[str, Any] = LlamaModel(a )
scaled_model.to(a )
scaled_model.eval()
lowerCAmelCase__ : Union[str, Any] = scaled_model(a ).last_hidden_state
lowerCAmelCase__ : Optional[int] = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowerCAmelCase__ : Any = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Dict = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : Any = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowerCAmelCase__ : str = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : str = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
lowerCAmelCase__ : int = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase__ : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCAmelCase__ : Tuple = 'Simply put, the theory of relativity states that '
lowerCAmelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCAmelCase__ : Dict = tokenizer.encode(a , return_tensors='pt' )
lowerCAmelCase__ : str = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=a )
# greedy generation outputs
lowerCAmelCase__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a )
lowerCAmelCase__ : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=a )
self.assertEqual(a , a )
| 307
| 0
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class A__ ( unittest.TestCase ):
lowercase = inspect.getfile(accelerate.test_utils )
lowercase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
lowercase = ['''accelerate''', '''launch''']
lowercase = Path.home() / '''.cache/huggingface/accelerate'''
lowercase = '''default_config.yaml'''
lowercase = config_folder / config_file
lowercase = config_folder / '''_default_config.yaml'''
lowercase = Path('tests/test_configs' )
@classmethod
def _lowerCamelCase ( cls : Any ):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _lowerCamelCase ( cls : Dict ):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=__snake_case ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(__snake_case ), self.test_file_path] , env=os.environ.copy() )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class A__ ( unittest.TestCase ):
lowercase = '''test-tpu'''
lowercase = '''us-central1-a'''
lowercase = '''ls'''
lowercase = ['''accelerate''', '''tpu-config''']
lowercase = '''cd /usr/share'''
lowercase = '''tests/test_samples/test_command_file.sh'''
lowercase = '''Running gcloud compute tpus tpu-vm ssh'''
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __snake_case , )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __snake_case , )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=__snake_case )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __snake_case , )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __snake_case , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __snake_case , )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __snake_case , )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __snake_case , )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __snake_case , )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=__snake_case , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __snake_case , )
| 371
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 307
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Union[str, Any] = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
# Let's go
lowerCAmelCase__ : Optional[int] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 350
|
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out
| 307
| 0
|
import functools
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
# Validation
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not all(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(SCREAMING_SNAKE_CASE_ ) != 3 or not all(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return 0
if min(SCREAMING_SNAKE_CASE_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(SCREAMING_SNAKE_CASE_ ) >= 366:
raise ValueError('All days elements should be less than 366' )
lowerCAmelCase__ : str = set(SCREAMING_SNAKE_CASE_ )
@functools.cache
def dynamic_programming(SCREAMING_SNAKE_CASE_ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCamelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase__ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = """"""
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split("""::""")
lowerCamelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase__ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = """"""
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
| 0
|
import inspect
import unittest
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowerCAmelCase__ : Any = inspect.getmembers(a , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCAmelCase__ : Tuple = 'k-diffusion'
elif backend == "invisible_watermark":
lowerCAmelCase__ : Union[str, Any] = 'invisible-watermark'
assert backend in deps, f'''{backend} is not in the deps table!'''
| 352
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 307
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 354
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 307
| 0
|
import math
def lowerCAmelCase__ ( ) -> None:
lowerCAmelCase__ : Optional[Any] = input('Enter message: ' )
lowerCAmelCase__ : Dict = int(input(F'''Enter key [2-{len(SCREAMING_SNAKE_CASE_ ) - 1}]: ''' ) )
lowerCAmelCase__ : Optional[Any] = input('Encryption/Decryption [e/d]: ' )
if mode.lower().startswith('e' ):
lowerCAmelCase__ : Union[str, Any] = encrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif mode.lower().startswith('d' ):
lowerCAmelCase__ : List[Any] = decrypt_message(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'''Output:\n{text + "|"}''' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Optional[int] = [''] * key
for col in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[int] = col
while pointer < len(SCREAMING_SNAKE_CASE_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : str = math.ceil(len(SCREAMING_SNAKE_CASE_ ) / key )
lowerCAmelCase__ : Optional[int] = key
lowerCAmelCase__ : List[str] = (num_cols * num_rows) - len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = [''] * num_cols
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Optional[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowerCAmelCase__ : List[Any] = 0
row += 1
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 355
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a )
lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a )
lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : str = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ):
'''simple docstring'''
lowerCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : List[Any] = model_a(**a )
lowerCAmelCase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : List[str] = model_a(**a )
lowerCAmelCase__ : int = after_outputs[0].numpy()
lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : int = 13
lowerCAmelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' )
lowerCAmelCase__ : str = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFViTModelTester(self )
lowerCAmelCase__ : Tuple = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ : Tuple = 13
lowerCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Any = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' )
lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModelTester(self )
lowerCAmelCase__ : List[str] = TFRobertaModelTester(self )
lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Dict = 13
lowerCAmelCase__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' )
lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self )
lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a )
lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
| 307
| 0
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCamelCase__ = random.Random()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
if rng is None:
lowerCAmelCase__ : int = global_rng
lowerCAmelCase__ : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
def __init__( self : Dict , a : Optional[Any] , a : Union[str, Any]=7 , a : Union[str, Any]=400 , a : Dict=2_000 , a : int=10 , a : Optional[Any]=160 , a : Any=8 , a : Optional[Any]=0.0 , a : Tuple=4_000 , a : str=False , a : Optional[Any]=True , ):
'''simple docstring'''
lowerCAmelCase__ : Any = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : Optional[Any] = min_seq_length
lowerCAmelCase__ : List[Any] = max_seq_length
lowerCAmelCase__ : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ : Dict = padding_value
lowerCAmelCase__ : Optional[int] = sampling_rate
lowerCAmelCase__ : Tuple = return_attention_mask
lowerCAmelCase__ : Union[str, Any] = do_normalize
lowerCAmelCase__ : Optional[Any] = feature_size
lowerCAmelCase__ : List[Any] = chunk_length
lowerCAmelCase__ : Optional[Any] = hop_length
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCamelCase ( self : List[str] , a : str=False , a : List[Any]=False ):
'''simple docstring'''
def _flatten(a : Tuple ):
return list(itertools.chain(*a ) )
if equal_length:
lowerCAmelCase__ : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase__ : List[str] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ : List[str] = [np.asarray(a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = WhisperFeatureExtractor if is_speech_available() else None
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = WhisperFeatureExtractionTester(self )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Tuple = feat_extract_first.save_pretrained(a )[0]
check_json_file_has_correct_format(a )
lowerCAmelCase__ : Optional[int] = self.feature_extraction_class.from_pretrained(a )
lowerCAmelCase__ : str = feat_extract_first.to_dict()
lowerCAmelCase__ : List[Any] = feat_extract_second.to_dict()
lowerCAmelCase__ : int = feat_extract_first.mel_filters
lowerCAmelCase__ : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(a , a ) )
self.assertEqual(a , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Tuple = os.path.join(a , 'feat_extract.json' )
feat_extract_first.to_json_file(a )
lowerCAmelCase__ : Union[str, Any] = self.feature_extraction_class.from_json_file(a )
lowerCAmelCase__ : Any = feat_extract_first.to_dict()
lowerCAmelCase__ : Tuple = feat_extract_second.to_dict()
lowerCAmelCase__ : Optional[Any] = feat_extract_first.mel_filters
lowerCAmelCase__ : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(a , a ) )
self.assertEqual(a , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase__ : int = [np.asarray(a ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase__ : int = feature_extractor(a , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowerCAmelCase__ : List[Any] = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
lowerCAmelCase__ : int = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test batched
lowerCAmelCase__ : Any = feature_extractor(a , return_tensors='np' ).input_features
lowerCAmelCase__ : Dict = feature_extractor(a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCAmelCase__ : List[Any] = np.asarray(a )
lowerCAmelCase__ : Dict = feature_extractor(a , return_tensors='np' ).input_features
lowerCAmelCase__ : Any = feature_extractor(a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test truncation required
lowerCAmelCase__ : List[str] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
lowerCAmelCase__ : int = [np.asarray(a ) for speech_input in speech_inputs]
lowerCAmelCase__ : Any = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowerCAmelCase__ : str = [np.asarray(a ) for speech_input in speech_inputs_truncated]
lowerCAmelCase__ : str = feature_extractor(a , return_tensors='np' ).input_features
lowerCAmelCase__ : Tuple = feature_extractor(a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
import torch
lowerCAmelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ : List[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
lowerCAmelCase__ : Union[str, Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase__ : List[str] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCAmelCase__ : Optional[Any] = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowerCamelCase ( self : List[Any] , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase__ : Union[str, Any] = ds.sort('id' ).select(range(a ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
lowerCAmelCase__ : int = self._load_datasamples(1 )
lowerCAmelCase__ : str = WhisperFeatureExtractor()
lowerCAmelCase__ : Optional[Any] = feature_extractor(a , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , a , atol=1E-4 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ : Union[str, Any] = self._load_datasamples(1 )[0]
lowerCAmelCase__ : Any = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
lowerCAmelCase__ : Optional[int] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=a )[0]
self.assertTrue(np.all(np.mean(a ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(a ) - 1 ) < 1E-3 ) )
| 356
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f:
lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Dict = []
for dataset in encoded_datasets:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Optional[Any] = with_conta
lowerCAmelCase__ : List[str] = with_conta
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : Optional[int] = mc_label
lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset)
lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Union[str, Any] = args.max_steps
lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Optional[int] = list(model.named_parameters() )
lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : int = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ):
lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy()
lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 307
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class A__ ( __magic_name__ ):
lowercase = 'roberta-prelayernorm'
def __init__( self : List[str] , a : List[str]=50_265 , a : Any=768 , a : Dict=12 , a : Dict=12 , a : List[str]=3_072 , a : Optional[int]="gelu" , a : Dict=0.1 , a : Dict=0.1 , a : str=512 , a : Optional[int]=2 , a : List[Any]=0.0_2 , a : List[str]=1E-12 , a : Optional[int]=1 , a : int=0 , a : Optional[int]=2 , a : int="absolute" , a : Any=True , a : int=None , **a : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowerCAmelCase__ : Optional[int] = vocab_size
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Optional[Any] = num_attention_heads
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : str = intermediate_size
lowerCAmelCase__ : List[str] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : Optional[Any] = type_vocab_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[str] = position_embedding_type
lowerCAmelCase__ : Optional[Any] = use_cache
lowerCAmelCase__ : Optional[Any] = classifier_dropout
class A__ ( __magic_name__ ):
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase__ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 357
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
| 0
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=" " ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = text.split(SCREAMING_SNAKE_CASE_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ : Tuple = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE_ ):
titles.append(title if title is not None else '' )
texts.append(SCREAMING_SNAKE_CASE_ )
return {"title": titles, "text": texts}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ : Union[str, Any] = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )['input_ids']
lowerCAmelCase__ : Union[str, Any] = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase__ : Optional[int] = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase__ : List[Any] = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase__ : Any = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase__ : Union[str, Any] = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase__ : Any = dataset.map(
partial(SCREAMING_SNAKE_CASE_ , ctx_encoder=SCREAMING_SNAKE_CASE_ , ctx_tokenizer=SCREAMING_SNAKE_CASE_ ) , batched=SCREAMING_SNAKE_CASE_ , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE_ , )
# And finally save your dataset
lowerCAmelCase__ : Optional[int] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(SCREAMING_SNAKE_CASE_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=SCREAMING_SNAKE_CASE_ )
# And save the index
lowerCAmelCase__ : str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(SCREAMING_SNAKE_CASE_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A__ :
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowercase = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowercase = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class A__ :
lowercase = field(
default=__magic_name__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowercase = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class A__ :
lowercase = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowercase = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 358
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 307
| 0
|
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 359
|
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 0
|
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
lowerCamelCase__ = {
"""abeja/gpt-neox-japanese-2.7b""": 2048,
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
with open(SCREAMING_SNAKE_CASE_ , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase__ : List[str] = json.loads(f.read() )
lowerCAmelCase__ : str = collections.OrderedDict()
lowerCAmelCase__ : Tuple = collections.OrderedDict()
lowerCAmelCase__ : str = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE_ , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase__ : Union[str, Any] = f.readlines()
lowerCAmelCase__ : str = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[int] = b
lowerCAmelCase__ : int = idx
for wd in b:
lowerCAmelCase__ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , a : Tuple , a : Union[str, Any] , a : str="<|endoftext|>" , a : Optional[Any]="<|endoftext|>" , a : Dict="<|startoftext|>" , a : int="<|endoftext|>" , a : List[str]=False , **a : List[str] , ):
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a ):
raise ValueError(
f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(a ):
raise ValueError(
f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
lowerCAmelCase__ : Any = do_clean_text
lowerCAmelCase__ : Union[str, Any] = load_vocab_and_emoji(a , a )
lowerCAmelCase__ : int = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return len(self.raw_vocab )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _lowerCamelCase ( self : Optional[Any] , a : Any ):
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text )
def _lowerCamelCase ( self : Tuple , a : Any ):
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token ) )
def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] ):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a )
def _lowerCamelCase ( self : Optional[int] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = ''.join(a ).strip()
return out_string
def _lowerCamelCase ( self : List[str] , a : "Conversation" ):
'''simple docstring'''
lowerCAmelCase__ : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a ) + [self.eos_token_id] )
if len(a ) > self.model_max_length:
lowerCAmelCase__ : str = input_ids[-self.model_max_length :]
return input_ids
def _lowerCamelCase ( self : Optional[Any] , a : str , a : Optional[str] = None ):
'''simple docstring'''
lowerCAmelCase__ : Any = 0
if os.path.isdir(a ):
lowerCAmelCase__ : List[str] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : List[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
lowerCAmelCase__ : Any = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
lowerCAmelCase__ : List[str] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(a , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase__ : List[str] = token_index
writer.write(','.join(a ) + '\n' )
index += 1
with open(a , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , a )
return vocab_file, emoji_file
class A__ ( __magic_name__ ):
def __init__( self : Tuple , a : str , a : str , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = vocab # same as swe
lowerCAmelCase__ : Optional[int] = ids_to_tokens # same as bpe
lowerCAmelCase__ : str = emoji
lowerCAmelCase__ : Any = np.max([len(a ) for w in self.vocab.keys()] )
lowerCAmelCase__ : Union[str, Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
lowerCAmelCase__ : int = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
lowerCAmelCase__ : Any = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
lowerCAmelCase__ : Dict = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCAmelCase__ : Union[str, Any] = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowerCAmelCase__ : Any = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
lowerCAmelCase__ : Optional[int] = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
lowerCAmelCase__ : Tuple = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
lowerCAmelCase__ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self.ids_to_tokens )
def _lowerCamelCase ( self : Any , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.content_repattera.sub('<URL>' , a )
lowerCAmelCase__ : Optional[int] = self.content_repattera.sub('<EMAIL>' , a )
lowerCAmelCase__ : Tuple = self.content_repattera.sub('<TEL>' , a )
lowerCAmelCase__ : Union[str, Any] = self.content_repattera.sub('<DATE>' , a )
lowerCAmelCase__ : Union[str, Any] = self.content_repattera.sub('<DATE>' , a )
lowerCAmelCase__ : str = self.content_repattera.sub('<PRICE>' , a )
lowerCAmelCase__ : Dict = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCAmelCase__ : str = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple=False ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = text.replace(' ' , '<SP>' )
lowerCAmelCase__ : Union[str, Any] = text.replace(' ' , '<SP>' )
lowerCAmelCase__ : Union[str, Any] = text.replace('\r\n' , '<BR>' )
lowerCAmelCase__ : Union[str, Any] = text.replace('\n' , '<BR>' )
lowerCAmelCase__ : Optional[int] = text.replace('\r' , '<BR>' )
lowerCAmelCase__ : Dict = text.replace('\t' , '<TAB>' )
lowerCAmelCase__ : str = text.replace('—' , 'ー' )
lowerCAmelCase__ : Optional[int] = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCAmelCase__ : List[Any] = text.replace(a , a )
if clean:
lowerCAmelCase__ : List[str] = self.clean_text(a )
def check_simbol(a : Dict ):
lowerCAmelCase__ : List[Any] = x.encode()
if len(a ) == 1 and len(a ) == 2:
lowerCAmelCase__ : Optional[int] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc_2a1 and c <= 0Xc_2bf)
or (c >= 0Xc_780 and c <= 0Xc_783)
or (c >= 0Xc_ab9 and c <= 0Xc_bbf)
or (c >= 0Xc_c80 and c <= 0Xc_da2)
):
return True
return False
def checkuae(a : Optional[int] ):
lowerCAmelCase__ : Optional[int] = x.encode()
if len(a ) == 1 and len(a ) == 3:
lowerCAmelCase__ : Optional[int] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe28_080 and c <= 0Xe2b_07f:
return True
return False
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Optional[int] = []
while pos < len(a ):
lowerCAmelCase__ : Optional[Any] = min(len(a ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
lowerCAmelCase__ : str = [] # (token_id, token, pos)
for e in range(a , a , -1 ):
lowerCAmelCase__ : Union[str, Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a ) > 2:
lowerCAmelCase__ : List[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(a ) > 0:
# the smallest token_id is adopted
lowerCAmelCase__ : Any = sorted(a , key=lambda a : x[0] )[0]
result.append(a )
lowerCAmelCase__ : Union[str, Any] = e
else:
lowerCAmelCase__ : Dict = pos + 1
lowerCAmelCase__ : List[Any] = text[pos:end]
if check_simbol(a ):
result.append('<KIGOU>' )
elif checkuae(a ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
lowerCAmelCase__ : Union[str, Any] = end
return result
def _lowerCamelCase ( self : int , a : List[Any] , a : Tuple="\n" ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : List[str] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(a ) > 0:
words.append(bytearray(a ).decode('utf-8' , errors='replace' ) )
lowerCAmelCase__ : Any = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(a )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(a )
if len(a ) > 0:
words.append(bytearray(a ).decode('utf-8' , errors='replace' ) )
lowerCAmelCase__ : Optional[Any] = ''.join(a )
return text
| 360
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 0
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCamelCase__ = logging.getLogger(__name__)
class A__ ( __magic_name__ ):
lowercase = 'token-classification'
def __init__( self : Optional[int] , a : List[str] ):
'''simple docstring'''
if type(a ) == dict:
lowerCAmelCase__ : List[str] = Namespace(**a )
lowerCAmelCase__ : int = import_module('tasks' )
try:
lowerCAmelCase__ : Any = getattr(a , hparams.task_type )
lowerCAmelCase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowerCAmelCase__ : List[str] = self.token_classification_task.get_labels(hparams.labels )
lowerCAmelCase__ : Optional[int] = CrossEntropyLoss().ignore_index
super().__init__(a , len(self.labels ) , self.mode )
def _lowerCamelCase ( self : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
return self.model(**a )
def _lowerCamelCase ( self : Any , a : Any , a : int ):
'''simple docstring'''
lowerCAmelCase__ : int = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : int = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : List[Any] = self(**a )
lowerCAmelCase__ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
lowerCAmelCase__ : int = self._feature_file(a )
if os.path.exists(a ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , a )
lowerCAmelCase__ : Any = torch.load(a )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
lowerCAmelCase__ : str = self.token_classification_task.read_examples_from_file(args.data_dir , a )
lowerCAmelCase__ : str = self.token_classification_task.convert_examples_to_features(
a , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['xlnet'] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=a , pad_on_left=bool(self.config.model_type in ['xlnet'] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('Saving features into cached file %s' , a )
torch.save(a , a )
def _lowerCamelCase ( self : str , a : int , a : int , a : bool = False ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self._feature_file(a )
logger.info('Loading features from cached file %s' , a )
lowerCAmelCase__ : str = torch.load(a )
lowerCAmelCase__ : Optional[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCAmelCase__ : Dict = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCAmelCase__ : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCAmelCase__ : Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCAmelCase__ : Dict = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(a , a , a , a ) , batch_size=a )
def _lowerCamelCase ( self : int , a : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
"""Compute validation""" ""
lowerCAmelCase__ : Optional[int] = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : str = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : Tuple = self(**a )
lowerCAmelCase__ : Tuple = outputs[:2]
lowerCAmelCase__ : Dict = logits.detach().cpu().numpy()
lowerCAmelCase__ : List[str] = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self : List[Any] , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.stack([x['val_loss'] for x in outputs] ).mean()
lowerCAmelCase__ : Optional[Any] = np.concatenate([x['pred'] for x in outputs] , axis=0 )
lowerCAmelCase__ : Dict = np.argmax(a , axis=2 )
lowerCAmelCase__ : str = np.concatenate([x['target'] for x in outputs] , axis=0 )
lowerCAmelCase__ : Optional[Any] = dict(enumerate(self.labels ) )
lowerCAmelCase__ : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCAmelCase__ : int = {
'val_loss': val_loss_mean,
'accuracy_score': accuracy_score(a , a ),
'precision': precision_score(a , a ),
'recall': recall_score(a , a ),
'f1': fa_score(a , a ),
}
lowerCAmelCase__ : Optional[Any] = dict(results.items() )
lowerCAmelCase__ : Optional[Any] = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self : Any , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = self._eval_end(a )
lowerCAmelCase__ : Any = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self : List[str] , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self._eval_end(a )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCAmelCase__ : int = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( a : List[Any] , a : Optional[Any] ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(a , a )
parser.add_argument(
'--task_type' , default='NER' , type=a , help='Task type to fine tune in training (e.g. NER, POS, etc)' )
parser.add_argument(
'--max_seq_length' , default=128 , type=a , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--labels' , default='' , type=a , help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.' , )
parser.add_argument(
'--gpus' , default=0 , type=a , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCamelCase__ = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = NERTransformer(args)
lowerCamelCase__ = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCamelCase__ = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
lowerCamelCase__ = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 361
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
| 0
|
import re
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
lowerCAmelCase__ : Any = re.compile(
r'^(?:0|94|\+94|0{2}94)' r'7(0|1|2|4|5|6|7|8)' r'(-| |)' r'\d{7}$' )
return bool(re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
lowerCamelCase__ = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 362
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
| 307
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A__ ( __magic_name__ ):
lowercase = 'perceiver'
def __init__( self : List[Any] , a : Dict=256 , a : List[Any]=1_280 , a : Dict=768 , a : Union[str, Any]=1 , a : Union[str, Any]=26 , a : Tuple=8 , a : str=8 , a : List[str]=None , a : str=None , a : List[Any]="kv" , a : int=1 , a : Any=1 , a : List[Any]="gelu" , a : Optional[Any]=0.1 , a : List[str]=0.0_2 , a : List[str]=1E-12 , a : List[str]=True , a : Optional[int]=262 , a : Dict=2_048 , a : Optional[Any]=56 , a : Dict=[368, 496] , a : List[str]=16 , a : int=1_920 , a : Any=16 , a : List[Any]=[1, 16, 224, 224] , **a : List[str] , ):
'''simple docstring'''
super().__init__(**a )
lowerCAmelCase__ : Dict = num_latents
lowerCAmelCase__ : List[str] = d_latents
lowerCAmelCase__ : Optional[int] = d_model
lowerCAmelCase__ : List[str] = num_blocks
lowerCAmelCase__ : Dict = num_self_attends_per_block
lowerCAmelCase__ : Any = num_self_attention_heads
lowerCAmelCase__ : Optional[Any] = num_cross_attention_heads
lowerCAmelCase__ : Optional[Any] = qk_channels
lowerCAmelCase__ : Optional[Any] = v_channels
lowerCAmelCase__ : int = cross_attention_shape_for_attention
lowerCAmelCase__ : int = self_attention_widening_factor
lowerCAmelCase__ : int = cross_attention_widening_factor
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : Optional[Any] = use_query_residual
# masked language modeling attributes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Optional[int] = max_position_embeddings
# image classification attributes
lowerCAmelCase__ : Tuple = image_size
# flow attributes
lowerCAmelCase__ : Dict = train_size
# multimodal autoencoding attributes
lowerCAmelCase__ : Union[str, Any] = num_frames
lowerCAmelCase__ : Optional[Any] = audio_samples_per_frame
lowerCAmelCase__ : int = samples_per_patch
lowerCAmelCase__ : str = output_shape
class A__ ( __magic_name__ ):
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase__ : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return 1E-4
def _lowerCamelCase ( self : Optional[Any] , a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a : int = -1 , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , a : int = 3 , a : int = 40 , a : int = 40 , ):
'''simple docstring'''
if isinstance(a , a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase__ : Optional[Any] = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase__ : List[str] = preprocessor.num_special_tokens_to_add(a )
lowerCAmelCase__ : List[str] = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase__ : Dict = [' '.join(['a'] ) * seq_length] * batch_size
lowerCAmelCase__ : int = dict(preprocessor(a , return_tensors=a ) )
lowerCAmelCase__ : Any = inputs.pop('input_ids' )
return inputs
elif isinstance(a , a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase__ : Any = compute_effective_axis_dimension(a , fixed_dimension=OnnxConfig.default_fixed_batch )
lowerCAmelCase__ : str = self._generate_dummy_images(a , a , a , a )
lowerCAmelCase__ : List[Any] = dict(preprocessor(images=a , return_tensors=a ) )
lowerCAmelCase__ : Any = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 363
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 )
| 307
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ConditionalDetrFeatureExtractor"""]
lowerCamelCase__ = ["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 364
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 307
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = """MobileNetV1Config"""
# Base docstring
lowerCamelCase__ = """google/mobilenet_v1_1.0_224"""
lowerCamelCase__ = [1, 1024, 7, 7]
# Image classification docstring
lowerCamelCase__ = """google/mobilenet_v1_1.0_224"""
lowerCamelCase__ = """tabby, tabby cat"""
lowerCamelCase__ = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
lowerCAmelCase__ : Optional[Any] = {}
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : str = model.mobilenet_va
else:
lowerCAmelCase__ : str = model
lowerCAmelCase__ : List[Any] = 'MobilenetV1/Conv2d_0/'
lowerCAmelCase__ : Tuple = backbone.conv_stem.convolution.weight
lowerCAmelCase__ : int = backbone.conv_stem.normalization.bias
lowerCAmelCase__ : Tuple = backbone.conv_stem.normalization.weight
lowerCAmelCase__ : Union[str, Any] = backbone.conv_stem.normalization.running_mean
lowerCAmelCase__ : Any = backbone.conv_stem.normalization.running_var
for i in range(13 ):
lowerCAmelCase__ : List[Any] = i + 1
lowerCAmelCase__ : Dict = i * 2
lowerCAmelCase__ : str = backbone.layer[pt_index]
lowerCAmelCase__ : Dict = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
lowerCAmelCase__ : List[str] = pointer.convolution.weight
lowerCAmelCase__ : List[Any] = pointer.normalization.bias
lowerCAmelCase__ : List[str] = pointer.normalization.weight
lowerCAmelCase__ : int = pointer.normalization.running_mean
lowerCAmelCase__ : List[Any] = pointer.normalization.running_var
lowerCAmelCase__ : Any = backbone.layer[pt_index + 1]
lowerCAmelCase__ : Optional[Any] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
lowerCAmelCase__ : Tuple = pointer.convolution.weight
lowerCAmelCase__ : Optional[int] = pointer.normalization.bias
lowerCAmelCase__ : str = pointer.normalization.weight
lowerCAmelCase__ : Union[str, Any] = pointer.normalization.running_mean
lowerCAmelCase__ : str = pointer.normalization.running_var
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
lowerCAmelCase__ : Optional[int] = model.classifier.weight
lowerCAmelCase__ : int = model.classifier.bias
return tf_to_pt_map
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
lowerCAmelCase__ : List[str] = tf.train.list_variables(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
lowerCAmelCase__ : List[Any] = tf.train.load_variable(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = array
# Build TF to PyTorch weights loading map
lowerCAmelCase__ : Dict = _build_tf_to_pytorch_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
lowerCAmelCase__ : Optional[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
lowerCAmelCase__ : Optional[int] = np.transpose(SCREAMING_SNAKE_CASE_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
lowerCAmelCase__ : Optional[int] = array.squeeze().transpose()
else:
lowerCAmelCase__ : Optional[int] = np.transpose(SCREAMING_SNAKE_CASE_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
lowerCAmelCase__ : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
tf_weights.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
tf_weights.pop(name + '/RMSProp' , SCREAMING_SNAKE_CASE_ )
tf_weights.pop(name + '/RMSProp_1' , SCREAMING_SNAKE_CASE_ )
tf_weights.pop(name + '/ExponentialMovingAverage' , SCREAMING_SNAKE_CASE_ )
logger.info(F'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> torch.Tensor:
lowerCAmelCase__ : Union[str, Any] = features.shape[-2:]
lowerCAmelCase__ : int = conv_layer.stride
lowerCAmelCase__ : int = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCAmelCase__ : str = max(kernel_height - stride_height , 0 )
else:
lowerCAmelCase__ : Any = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowerCAmelCase__ : Any = max(kernel_width - stride_width , 0 )
else:
lowerCAmelCase__ : int = max(kernel_width - (in_width % stride_width) , 0 )
lowerCAmelCase__ : Tuple = pad_along_width // 2
lowerCAmelCase__ : List[Any] = pad_along_width - pad_left
lowerCAmelCase__ : Any = pad_along_height // 2
lowerCAmelCase__ : Dict = pad_along_height - pad_top
lowerCAmelCase__ : Optional[Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'constant' , 0.0 )
class A__ ( nn.Module ):
def __init__( self : int , a : MobileNetVaConfig , a : int , a : int , a : int , a : Optional[int] = 1 , a : Optional[int] = 1 , a : bool = False , a : Optional[bool] = True , a : Optional[bool or str] = True , ):
super().__init__()
lowerCAmelCase__ : int = config
if in_channels % groups != 0:
raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
lowerCAmelCase__ : int = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowerCAmelCase__ : Tuple = nn.Convad(
in_channels=a , out_channels=a , kernel_size=a , stride=a , padding=a , groups=a , bias=a , padding_mode='zeros' , )
if use_normalization:
lowerCAmelCase__ : Tuple = nn.BatchNormad(
num_features=a , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=a , track_running_stats=a , )
else:
lowerCAmelCase__ : Dict = None
if use_activation:
if isinstance(a , a ):
lowerCAmelCase__ : str = ACTaFN[use_activation]
elif isinstance(config.hidden_act , a ):
lowerCAmelCase__ : Any = ACTaFN[config.hidden_act]
else:
lowerCAmelCase__ : List[str] = config.hidden_act
else:
lowerCAmelCase__ : List[Any] = None
def _lowerCamelCase ( self : str , a : torch.Tensor ):
if self.config.tf_padding:
lowerCAmelCase__ : int = apply_tf_padding(a , self.convolution )
lowerCAmelCase__ : int = self.convolution(a )
if self.normalization is not None:
lowerCAmelCase__ : Union[str, Any] = self.normalization(a )
if self.activation is not None:
lowerCAmelCase__ : int = self.activation(a )
return features
class A__ ( __magic_name__ ):
lowercase = MobileNetVaConfig
lowercase = load_tf_weights_in_mobilenet_va
lowercase = 'mobilenet_v1'
lowercase = 'pixel_values'
lowercase = False
def _lowerCamelCase ( self : Any , a : Union[nn.Linear, nn.Convad] ):
if isinstance(a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowerCamelCase__ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCamelCase__ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , __magic_name__ , )
class A__ ( __magic_name__ ):
def __init__( self : Any , a : MobileNetVaConfig , a : bool = True ):
super().__init__(a )
lowerCAmelCase__ : int = config
lowerCAmelCase__ : str = 32
lowerCAmelCase__ : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowerCAmelCase__ : List[Any] = MobileNetVaConvLayer(
a , in_channels=config.num_channels , out_channels=a , kernel_size=3 , stride=2 , )
lowerCAmelCase__ : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
for i in range(13 ):
lowerCAmelCase__ : int = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCAmelCase__ : List[str] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
a , in_channels=a , out_channels=a , kernel_size=3 , stride=strides[i] , groups=a , ) )
self.layer.append(
MobileNetVaConvLayer(
a , in_channels=a , out_channels=a , kernel_size=1 , ) )
lowerCAmelCase__ : str = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _lowerCamelCase ( self : Tuple , a : Optional[int] ):
raise NotImplementedError
@add_start_docstrings_to_model_forward(a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCamelCase ( self : str , a : Optional[torch.Tensor] = None , a : Optional[bool] = None , a : Optional[bool] = None , ):
lowerCAmelCase__ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
lowerCAmelCase__ : Dict = self.conv_stem(a )
lowerCAmelCase__ : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowerCAmelCase__ : List[str] = layer_module(a )
if output_hidden_states:
lowerCAmelCase__ : List[str] = all_hidden_states + (hidden_states,)
lowerCAmelCase__ : List[str] = hidden_states
if self.pooler is not None:
lowerCAmelCase__ : Union[str, Any] = torch.flatten(self.pooler(a ) , start_dim=1 )
else:
lowerCAmelCase__ : Union[str, Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a , pooler_output=a , hidden_states=a , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __magic_name__ , )
class A__ ( __magic_name__ ):
def __init__( self : Dict , a : MobileNetVaConfig ):
super().__init__(a )
lowerCAmelCase__ : Any = config.num_labels
lowerCAmelCase__ : Dict = MobileNetVaModel(a )
lowerCAmelCase__ : Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCAmelCase__ : str = nn.Dropout(config.classifier_dropout_prob , inplace=a )
lowerCAmelCase__ : Optional[Any] = nn.Linear(a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCamelCase ( self : int , a : Optional[torch.Tensor] = None , a : Optional[bool] = None , a : Optional[torch.Tensor] = None , a : Optional[bool] = None , ):
lowerCAmelCase__ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Optional[Any] = self.mobilenet_va(a , output_hidden_states=a , return_dict=a )
lowerCAmelCase__ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ : Tuple = self.classifier(self.dropout(a ) )
lowerCAmelCase__ : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ : Tuple = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ : str = 'single_label_classification'
else:
lowerCAmelCase__ : Union[str, Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
lowerCAmelCase__ : Optional[int] = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase__ : List[Any] = loss_fct(a , a )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ : Any = CrossEntropyLoss()
lowerCAmelCase__ : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ : Optional[Any] = BCEWithLogitsLoss()
lowerCAmelCase__ : Any = loss_fct(a , a )
if not return_dict:
lowerCAmelCase__ : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=a , logits=a , hidden_states=outputs.hidden_states , )
| 365
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 0
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class A__ ( __magic_name__ ):
def __init__( self : List[Any] , **a : Any ):
'''simple docstring'''
super().__init__(**a )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[Any] , a : Union[str, List[str], "Image", List["Image"]] , **a : Optional[int] ):
'''simple docstring'''
return super().__call__(a , **a )
def _lowerCamelCase ( self : Union[str, Any] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : Dict = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : Optional[Any] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def _lowerCamelCase ( self : Dict , a : Union[str, Any] , a : Any=None , a : Union[str, Any]="This is a photo of {}." ):
'''simple docstring'''
lowerCAmelCase__ : Any = load_image(a )
lowerCAmelCase__ : Dict = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ : Optional[Any] = candidate_labels
lowerCAmelCase__ : int = [hypothesis_template.format(a ) for x in candidate_labels]
lowerCAmelCase__ : List[str] = self.tokenizer(a , return_tensors=self.framework , padding=a )
lowerCAmelCase__ : Tuple = [text_inputs]
return inputs
def _lowerCamelCase ( self : Tuple , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = model_inputs.pop('candidate_labels' )
lowerCAmelCase__ : List[Any] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , a ):
lowerCAmelCase__ : Tuple = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ : int = text_inputs[0][0]
lowerCAmelCase__ : Any = self.model(**a , **a )
lowerCAmelCase__ : int = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def _lowerCamelCase ( self : int , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = model_outputs.pop('candidate_labels' )
lowerCAmelCase__ : List[Any] = model_outputs['logits'][0]
if self.framework == "pt":
lowerCAmelCase__ : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ : Tuple = probs.tolist()
if not isinstance(a , a ):
lowerCAmelCase__ : int = [scores]
elif self.framework == "tf":
lowerCAmelCase__ : Tuple = stable_softmax(a , axis=-1 )
lowerCAmelCase__ : List[Any] = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowerCAmelCase__ : Any = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(a , a ) , key=lambda a : -x[0] )
]
return result
| 366
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowerCAmelCase__ : int = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(a ) , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) )
lowerCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : int = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Dict = tf.constant(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : int = jnp.array(a )
self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) )
lowerCAmelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = jnp.array(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) )
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[str] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : str = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) )
lowerCAmelCase__ : str = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Optional[Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) )
@require_torch
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = np.random.randn(3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = np.random.randn(3 , 4 )
lowerCAmelCase__ : Tuple = jnp.array(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
| 307
| 0
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowerCamelCase__ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
lowerCamelCase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
lowerCamelCase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict[str, int]:
lowerCAmelCase__ : Optional[Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return x[0]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : int = get_letter_count(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = ''.join(freq_to_letter[freq] )
lowerCAmelCase__ : List[str] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : Optional[int] = get_frequency_order(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ = concatenate_datasets
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadManager
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 307
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""YolosFeatureExtractor"""]
lowerCamelCase__ = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = DanceDiffusionPipeline
lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase = False
lowercase = False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase__ : Tuple = IPNDMScheduler()
lowerCAmelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a )
lowerCAmelCase__ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a )
lowerCAmelCase__ : List[Any] = pipe(**a )
lowerCAmelCase__ : List[str] = output.audios
lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase__ : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : int = output.audios
lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = torch_device
lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[int] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : str = output.audios
lowerCAmelCase__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 307
| 0
|
from math import pi
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 369
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307
| 0
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> set[str]:
lowerCAmelCase__ : Optional[Any] = set(SCREAMING_SNAKE_CASE_ ), [start]
while stack:
lowerCAmelCase__ : List[Any] = stack.pop()
explored.add(SCREAMING_SNAKE_CASE_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(SCREAMING_SNAKE_CASE_ )
return explored
lowerCamelCase__ = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 370
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A__ :
def __init__( self : List[str] , a : Any , a : Dict=13 , a : Optional[Any]=7 , a : Tuple=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=True , a : Dict=99 , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : Union[str, Any]=37 , a : Any="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Union[str, Any]=16 , a : Optional[int]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Tuple = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : int = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : List[Any] = scope
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = LlamaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Dict , a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = LlamaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , encoder_hidden_states=a , )
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] , a : Any , a : List[str] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : str , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowerCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = LlamaModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : int = type
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Dict = input_dict['input_ids']
lowerCAmelCase__ : Optional[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : Tuple = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : List[str] = 'single_label_classification'
lowerCAmelCase__ : List[Any] = input_dict['input_ids']
lowerCAmelCase__ : List[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : int = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Optional[Any] = 'multi_label_classification'
lowerCAmelCase__ : List[str] = input_dict['input_ids']
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ : Dict = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : List[Any] = LlamaModel(a )
original_model.to(a )
original_model.eval()
lowerCAmelCase__ : List[Any] = original_model(a ).last_hidden_state
lowerCAmelCase__ : str = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : Any = {'type': scaling_type, 'factor': 1_0.0}
lowerCAmelCase__ : Union[str, Any] = LlamaModel(a )
scaled_model.to(a )
scaled_model.eval()
lowerCAmelCase__ : Union[str, Any] = scaled_model(a ).last_hidden_state
lowerCAmelCase__ : Optional[int] = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowerCAmelCase__ : Any = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Dict = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : Any = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowerCAmelCase__ : str = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : str = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
lowerCAmelCase__ : int = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase__ : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCAmelCase__ : Tuple = 'Simply put, the theory of relativity states that '
lowerCAmelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCAmelCase__ : Dict = tokenizer.encode(a , return_tensors='pt' )
lowerCAmelCase__ : str = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=a )
# greedy generation outputs
lowerCAmelCase__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a )
lowerCAmelCase__ : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=a )
self.assertEqual(a , a )
| 307
| 0
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError('String lengths must match!' )
lowerCAmelCase__ : Optional[int] = 0
for chara, chara in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 307
| 0
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if num < 0:
return False
lowerCAmelCase__ : int = num
lowerCAmelCase__ : int = 0
while num > 0:
lowerCAmelCase__ : Any = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out
| 307
| 0
|
import numpy as np
from PIL import Image
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> np.ndarray:
lowerCAmelCase__ : Optional[Any] = np.array(SCREAMING_SNAKE_CASE_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : Union[str, Any] = 0
# compute the shape of the output matrix
lowerCAmelCase__ : Optional[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase__ : Tuple = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase__ : int = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : Union[str, Any] = 0
return updated_arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> np.ndarray:
lowerCAmelCase__ : List[str] = np.array(SCREAMING_SNAKE_CASE_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Optional[int] = 0
# compute the shape of the output matrix
lowerCAmelCase__ : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase__ : Optional[Any] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase__ : int = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : int = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
lowerCamelCase__ = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 351
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCamelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase__ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = """"""
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split("""::""")
lowerCamelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase__ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = """"""
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
| 0
|
import os
import sys
import unittest
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase__ = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
lowerCamelCase__ = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = get_test_to_tester_mapping(a )
lowerCAmelCase__ : Any = get_test_to_tester_mapping(a )
lowerCAmelCase__ : Optional[Any] = {'BertModelTest': 'BertModelTester'}
lowerCAmelCase__ : List[str] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(a ) , a )
self.assertEqual(get_test_info.to_json(a ) , a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : str = get_model_to_test_mapping(a )
lowerCAmelCase__ : Dict = get_model_to_test_mapping(a )
lowerCAmelCase__ : str = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
lowerCAmelCase__ : List[str] = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(a ) , a )
self.assertEqual(get_test_info.to_json(a ) , a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = get_model_to_tester_mapping(a )
lowerCAmelCase__ : Union[str, Any] = get_model_to_tester_mapping(a )
lowerCAmelCase__ : Union[str, Any] = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
lowerCAmelCase__ : List[str] = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(a ) , a )
self.assertEqual(get_test_info.to_json(a ) , a )
| 352
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 307
| 0
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = BertTokenizer
lowercase = BertTokenizerFast
lowercase = True
lowercase = True
lowercase = filter_non_english
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _lowerCamelCase ( self : Union[str, Any] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'UNwant\u00E9d,running'
lowerCAmelCase__ : int = 'unwanted, running'
return input_text, output_text
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ : Optional[int] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(a , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [9, 6, 7, 12, 10, 11] )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ : List[str] = self.get_rust_tokenizer()
lowerCAmelCase__ : Union[str, Any] = 'UNwant\u00E9d,running'
lowerCAmelCase__ : List[Any] = tokenizer.tokenize(a )
lowerCAmelCase__ : int = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Optional[int] = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Dict = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowerCAmelCase__ : str = self.get_rust_tokenizer()
lowerCAmelCase__ : Optional[Any] = tokenizer.encode(a )
lowerCAmelCase__ : str = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
# With lower casing
lowerCAmelCase__ : int = self.get_tokenizer(do_lower_case=a )
lowerCAmelCase__ : List[str] = self.get_rust_tokenizer(do_lower_case=a )
lowerCAmelCase__ : Dict = 'UNwant\u00E9d,running'
lowerCAmelCase__ : Dict = tokenizer.tokenize(a )
lowerCAmelCase__ : Optional[int] = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : List[Any] = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Optional[int] = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowerCAmelCase__ : str = self.get_rust_tokenizer()
lowerCAmelCase__ : Any = tokenizer.encode(a )
lowerCAmelCase__ : Optional[Any] = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = BasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : str = BasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : str = BasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = BasicTokenizer(do_lower_case=a , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = BasicTokenizer()
lowerCAmelCase__ : Tuple = 'a\n\'ll !!to?\'d of, can\'t.'
lowerCAmelCase__ : Optional[Any] = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(a ) , a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCAmelCase__ : Dict = {}
for i, token in enumerate(a ):
lowerCAmelCase__ : Union[str, Any] = i
lowerCAmelCase__ : Optional[Any] = WordpieceTokenizer(vocab=a , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.get_tokenizer()
lowerCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained('bert-base-uncased' )
lowerCAmelCase__ : List[Any] = tokenizer.encode('sequence builders' , add_special_tokens=a )
lowerCAmelCase__ : Optional[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=a )
lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(a )
lowerCAmelCase__ : Dict = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : Optional[int] = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
lowerCAmelCase__ : List[Any] = tokenizer_r.encode_plus(
a , return_attention_mask=a , return_token_type_ids=a , return_offsets_mapping=a , add_special_tokens=a , )
lowerCAmelCase__ : Tuple = tokenizer_r.do_lower_case if hasattr(a , 'do_lower_case' ) else False
lowerCAmelCase__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = ['的', '人', '有']
lowerCAmelCase__ : Optional[Any] = ''.join(a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : List[str] = tokenizer_p.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Dict = tokenizer_r.encode(a , add_special_tokens=a )
lowerCAmelCase__ : str = tokenizer_r.convert_ids_to_tokens(a )
lowerCAmelCase__ : Tuple = tokenizer_p.convert_ids_to_tokens(a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(a , a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : Any = self.tokenizer_class.from_pretrained(a , **a )
lowerCAmelCase__ : int = tokenizer_r.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Optional[Any] = tokenizer_p.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Tuple = tokenizer_r.convert_ids_to_tokens(a )
lowerCAmelCase__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(a )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase__ : Optional[int] = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(a )
]
self.assertListEqual(a , a )
self.assertListEqual(a , a )
| 353
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 0
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(SCREAMING_SNAKE_CASE_ , n - 1 , SCREAMING_SNAKE_CASE_ ) * a) % mod
else:
lowerCAmelCase__ : str = binary_exponentiation(SCREAMING_SNAKE_CASE_ , n / 2 , SCREAMING_SNAKE_CASE_ )
return (b * b) % mod
# a prime number
lowerCamelCase__ = 701
lowerCamelCase__ = 10_0000_0000
lowerCamelCase__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 354
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 307
| 0
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(SCREAMING_SNAKE_CASE_ ), magnitude * sin(SCREAMING_SNAKE_CASE_ )]
return [magnitude * cos(radians(SCREAMING_SNAKE_CASE_ ) ), magnitude * sin(radians(SCREAMING_SNAKE_CASE_ ) )]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 10**-1 ) -> bool:
lowerCAmelCase__ : NDArray[floataa] = cross(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : float = sum(SCREAMING_SNAKE_CASE_ )
return abs(SCREAMING_SNAKE_CASE_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
lowerCamelCase__ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 355
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a )
lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a )
lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : str = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ):
'''simple docstring'''
lowerCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : List[Any] = model_a(**a )
lowerCAmelCase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : List[str] = model_a(**a )
lowerCAmelCase__ : int = after_outputs[0].numpy()
lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : int = 13
lowerCAmelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' )
lowerCAmelCase__ : str = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFViTModelTester(self )
lowerCAmelCase__ : Tuple = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ : Tuple = 13
lowerCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Any = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' )
lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModelTester(self )
lowerCAmelCase__ : List[str] = TFRobertaModelTester(self )
lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Dict = 13
lowerCAmelCase__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' )
lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self )
lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a )
lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
| 307
| 0
|
import os
def lowerCAmelCase__ ( ) -> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE_ ) , 'num.txt' )
with open(SCREAMING_SNAKE_CASE_ ) as file_hand:
return str(sum(int(SCREAMING_SNAKE_CASE_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 356
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f:
lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Dict = []
for dataset in encoded_datasets:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Optional[Any] = with_conta
lowerCAmelCase__ : List[str] = with_conta
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : Optional[int] = mc_label
lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset)
lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Union[str, Any] = args.max_steps
lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Optional[int] = list(model.named_parameters() )
lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : int = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ):
lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy()
lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 307
| 0
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
lowerCAmelCase__ : Optional[int] = get_failure_array(SCREAMING_SNAKE_CASE_ )
# 2) Step through text searching for pattern
lowerCAmelCase__ : Dict = 0, 0 # index into text, pattern
while i < len(SCREAMING_SNAKE_CASE_ ):
if pattern[j] == text[i]:
if j == (len(SCREAMING_SNAKE_CASE_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowerCAmelCase__ : Union[str, Any] = failure[j - 1]
continue
i += 1
return False
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list[int]:
lowerCAmelCase__ : Any = [0]
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Optional[int] = 1
while j < len(SCREAMING_SNAKE_CASE_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowerCAmelCase__ : Optional[Any] = failure[i - 1]
continue
j += 1
failure.append(SCREAMING_SNAKE_CASE_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase__ = """abc1abc12"""
lowerCamelCase__ = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCamelCase__ = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase__ = """ABABX"""
lowerCamelCase__ = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
lowerCamelCase__ = """AAAB"""
lowerCamelCase__ = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
lowerCamelCase__ = """abcdabcy"""
lowerCamelCase__ = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
lowerCamelCase__ = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 357
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
| 0
|
import argparse
import json
import subprocess
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Optional[Any] = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowerCAmelCase__ : List[str] = subprocess.run(SCREAMING_SNAKE_CASE_ , shell=SCREAMING_SNAKE_CASE_ , stdout=subprocess.PIPE )
lowerCAmelCase__ : Any = output.stdout.decode('utf-8' )
lowerCAmelCase__ : Tuple = json.loads(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(SCREAMING_SNAKE_CASE_ )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCAmelCase__ : Optional[int] = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return values.split(',' )
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowerCamelCase__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 358
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 307
| 0
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class A__ ( __magic_name__ ):
lowercase = 'informer'
lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[Any] , a : Optional[int] = None , a : Optional[int] = None , a : str = "student_t" , a : str = "nll" , a : int = 1 , a : List[int] = None , a : Optional[Union[str, bool]] = "mean" , a : int = 0 , a : int = 0 , a : int = 0 , a : int = 0 , a : Optional[List[int]] = None , a : Optional[List[int]] = None , a : int = 64 , a : int = 32 , a : int = 32 , a : int = 2 , a : int = 2 , a : int = 2 , a : int = 2 , a : bool = True , a : str = "gelu" , a : float = 0.0_5 , a : float = 0.1 , a : float = 0.1 , a : float = 0.1 , a : float = 0.1 , a : int = 100 , a : float = 0.0_2 , a : Optional[int]=True , a : str = "prob" , a : int = 5 , a : bool = True , **a : int , ):
'''simple docstring'''
lowerCAmelCase__ : Any = prediction_length
lowerCAmelCase__ : str = context_length or prediction_length
lowerCAmelCase__ : Dict = distribution_output
lowerCAmelCase__ : int = loss
lowerCAmelCase__ : Any = input_size
lowerCAmelCase__ : List[str] = num_time_features
lowerCAmelCase__ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase__ : Optional[int] = scaling
lowerCAmelCase__ : Any = num_dynamic_real_features
lowerCAmelCase__ : List[Any] = num_static_real_features
lowerCAmelCase__ : int = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(a ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase__ : Union[str, Any] = cardinality
else:
lowerCAmelCase__ : int = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(a ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase__ : str = embedding_dimension
else:
lowerCAmelCase__ : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase__ : Any = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase__ : List[str] = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase__ : List[Any] = d_model
lowerCAmelCase__ : str = encoder_attention_heads
lowerCAmelCase__ : Optional[int] = decoder_attention_heads
lowerCAmelCase__ : List[Any] = encoder_ffn_dim
lowerCAmelCase__ : List[str] = decoder_ffn_dim
lowerCAmelCase__ : List[Any] = encoder_layers
lowerCAmelCase__ : Dict = decoder_layers
lowerCAmelCase__ : str = dropout
lowerCAmelCase__ : Any = attention_dropout
lowerCAmelCase__ : Any = activation_dropout
lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop
lowerCAmelCase__ : Optional[Any] = decoder_layerdrop
lowerCAmelCase__ : Optional[int] = activation_function
lowerCAmelCase__ : int = init_std
lowerCAmelCase__ : Optional[int] = use_cache
# Informer
lowerCAmelCase__ : Dict = attention_type
lowerCAmelCase__ : Optional[int] = sampling_factor
lowerCAmelCase__ : int = distil
super().__init__(is_encoder_decoder=a , **a )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 359
|
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 0
|
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class A__ ( __magic_name__ ):
lowercase = 'M-CLIP'
def __init__( self : Optional[int] , a : Union[str, Any]=1_024 , a : Tuple=768 , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = transformerDimSize
lowerCAmelCase__ : int = imageDimSize
super().__init__(**a )
class A__ ( __magic_name__ ):
lowercase = MCLIPConfig
def __init__( self : List[Any] , a : str , *a : int , **a : Optional[Any] ):
'''simple docstring'''
super().__init__(a , *a , **a )
lowerCAmelCase__ : str = XLMRobertaModel(a )
lowerCAmelCase__ : str = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _lowerCamelCase ( self : Optional[int] , a : Optional[Any] , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.transformer(input_ids=a , attention_mask=a )[0]
lowerCAmelCase__ : Any = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a ), embs
| 360
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 0
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCamelCase__ = HfArgumentParser(InitializationArguments)
lowerCamelCase__ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCamelCase__ = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
lowerCamelCase__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCamelCase__ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 361
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
| 0
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , a : Optional[int] , a : int=2 , a : Union[str, Any]=56 , a : List[Any]=True , a : Dict=True , a : List[Any]=True , a : Optional[int]=True , a : Tuple=99 , a : Any=32 , a : Any=2 , a : Optional[int]=2 , a : List[Any]=7 , a : Optional[int]="gelu_new" , a : Any=0.1 , a : Tuple=0.1 , a : str=512 , a : str=16 , a : Optional[Any]=2 , a : List[str]=0.0_2 , a : Tuple=4 , a : Optional[int]="block_sparse" , a : List[str]=True , a : str=False , a : Any=2 , a : Optional[int]=3 , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : List[Any] = is_training
lowerCAmelCase__ : Tuple = use_attention_mask
lowerCAmelCase__ : str = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : Optional[Any] = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : Tuple = type_sequence_label_size
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : List[Any] = num_choices
lowerCAmelCase__ : Dict = rescale_embeddings
lowerCAmelCase__ : Tuple = attention_type
lowerCAmelCase__ : Any = use_bias
lowerCAmelCase__ : Tuple = block_size
lowerCAmelCase__ : Dict = num_random_blocks
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_attention_mask:
lowerCAmelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Any = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ : Tuple = config_and_inputs
lowerCAmelCase__ : Dict = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().test_hidden_states_output()
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : List[str] = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ : Dict = self._prepare_for_class(a , a )
lowerCAmelCase__ : Optional[int] = model_class(a )
@jax.jit
def model_jitted(a : List[str] , a : Optional[Any]=None , **a : int ):
return model(input_ids=a , attention_mask=a , **a )
with self.subTest('JIT Enabled' ):
lowerCAmelCase__ : Optional[int] = model_jitted(**a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase__ : str = model_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self : List[Any] , a : Optional[int] , a : Optional[Any] , a : Union[str, Any] , a : Optional[int]=1E-5 , a : List[Any]="outputs" , a : str=None ):
'''simple docstring'''
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(a , a , a , a , a , a )
| 362
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
| 307
| 0
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( __magic_name__ ):
def __init__( self : Any , *a : Any , **a : List[str] ):
'''simple docstring'''
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 363
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 )
| 307
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase__ = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 364
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 307
| 0
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase__ = get_tests_dir("""fixtures/dummy-config.json""")
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
lowerCAmelCase__ : int = 0
def _lowerCamelCase ( self : Any ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def _lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Optional[int] = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : List[str] ):
lowerCAmelCase__ : Optional[int] = AutoConfig.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : Dict ):
lowerCAmelCase__ : List[Any] = AutoConfig.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : Any ):
lowerCAmelCase__ : str = AutoConfig.for_model('roberta' )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase__ : Any = os.path.join(a , 'fake-roberta' )
os.makedirs(a , exist_ok=a )
with open(os.path.join(a , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase__ : Union[str, Any] = AutoConfig.from_pretrained(a )
self.assertEqual(type(a ) , a )
def _lowerCamelCase ( self : Optional[int] ):
try:
AutoConfig.register('custom' , a )
# Wrong model type will raise an error
with self.assertRaises(a ):
AutoConfig.register('model' , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoConfig.register('bert' , a )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase__ : Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowerCamelCase ( self : Optional[int] ):
with self.assertRaisesRegex(
a , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCAmelCase__ : Any = AutoConfig.from_pretrained('bert-base' )
def _lowerCamelCase ( self : Optional[int] ):
with self.assertRaisesRegex(
a , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(a , revision='aaaaaa' )
def _lowerCamelCase ( self : List[Any] ):
with self.assertRaisesRegex(
a , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
lowerCAmelCase__ : List[str] = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def _lowerCamelCase ( self : Any ):
with self.assertRaises(a ):
lowerCAmelCase__ : int = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
lowerCAmelCase__ : List[str] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=a )
lowerCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=a )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : str = AutoConfig.from_pretrained(a , trust_remote_code=a )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def _lowerCamelCase ( self : Optional[Any] ):
class A__ ( __magic_name__ ):
lowercase = 'new-model'
try:
AutoConfig.register('new-model' , a )
# If remote code is not set, the default is to use local
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=a )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=a )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 365
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 0
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[int]:
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCAmelCase__ : str = i + 1
else:
lowerCAmelCase__ : int = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 366
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowerCAmelCase__ : int = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(a ) , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) )
lowerCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : int = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Dict = tf.constant(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : int = jnp.array(a )
self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) )
lowerCAmelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = jnp.array(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) )
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[str] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : str = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) )
lowerCAmelCase__ : str = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Optional[Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) )
@require_torch
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = np.random.randn(3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = np.random.randn(3 , 4 )
lowerCAmelCase__ : Tuple = jnp.array(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
| 307
| 0
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def _lowerCamelCase ( *a : Any , **a : Any ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A__ ( unittest.TestCase ):
lowercase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _lowerCamelCase ( self : Tuple , a : Union[str, Any] , a : Optional[Any] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _lowerCamelCase ( self : Union[str, Any] , a : Optional[int] , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
'score': ANY(a ),
'label': ANY(a ),
'box': {'xmin': ANY(a ), 'ymin': ANY(a ), 'xmax': ANY(a ), 'ymax': ANY(a )},
} , )
import datasets
lowerCAmelCase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowerCAmelCase__ : List[str] = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
lowerCAmelCase__ : Optional[Any] = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
'score': ANY(a ),
'label': ANY(a ),
'box': {'xmin': ANY(a ), 'ymin': ANY(a ), 'xmax': ANY(a ), 'ymax': ANY(a )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
@require_torch
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = 'hf-internal-testing/tiny-detr-mobilenetsv3'
lowerCAmelCase__ : List[str] = AutoModelForObjectDetection.from_pretrained(a )
lowerCAmelCase__ : Dict = AutoFeatureExtractor.from_pretrained(a )
lowerCAmelCase__ : Union[str, Any] = ObjectDetectionPipeline(model=a , feature_extractor=a )
lowerCAmelCase__ : Any = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] , )
lowerCAmelCase__ : List[str] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] , )
@require_torch
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = 'facebook/detr-resnet-50'
lowerCAmelCase__ : Optional[int] = AutoModelForObjectDetection.from_pretrained(a )
lowerCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(a )
lowerCAmelCase__ : int = ObjectDetectionPipeline(model=a , feature_extractor=a )
lowerCAmelCase__ : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
lowerCAmelCase__ : str = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = 'facebook/detr-resnet-50'
lowerCAmelCase__ : Union[str, Any] = pipeline('object-detection' , model=a )
lowerCAmelCase__ : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
lowerCAmelCase__ : int = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = 0.9_9_8_5
lowerCAmelCase__ : List[str] = 'facebook/detr-resnet-50'
lowerCAmelCase__ : Dict = pipeline('object-detection' , model=a )
lowerCAmelCase__ : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = 'Narsil/layoutlmv3-finetuned-funsd'
lowerCAmelCase__ : Tuple = 0.9_9_9_3
lowerCAmelCase__ : Optional[int] = pipeline('object-detection' , model=a , threshold=a )
lowerCAmelCase__ : Optional[Any] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] , )
| 367
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ = concatenate_datasets
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadManager
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 307
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 )
| 368
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = DanceDiffusionPipeline
lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase = False
lowercase = False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase__ : Tuple = IPNDMScheduler()
lowerCAmelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a )
lowerCAmelCase__ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a )
lowerCAmelCase__ : List[Any] = pipe(**a )
lowerCAmelCase__ : List[str] = output.audios
lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase__ : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : int = output.audios
lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = torch_device
lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[int] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : str = output.audios
lowerCAmelCase__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 307
| 0
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self : Any , a : Optional[Any] , a : int=13 , a : List[Any]=32 , a : Optional[int]=3 , a : List[str]=4 , a : List[Any]=[10, 20, 30, 40] , a : Optional[int]=[2, 2, 3, 2] , a : Any=True , a : Tuple=True , a : int=37 , a : List[str]="gelu" , a : Dict=10 , a : Optional[int]=0.0_2 , a : List[Any]=["stage2", "stage3", "stage4"] , a : Union[str, Any]=3 , a : Tuple=None , ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : Tuple = image_size
lowerCAmelCase__ : Optional[int] = num_channels
lowerCAmelCase__ : Any = num_stages
lowerCAmelCase__ : str = hidden_sizes
lowerCAmelCase__ : Dict = depths
lowerCAmelCase__ : int = is_training
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : List[str] = hidden_act
lowerCAmelCase__ : Optional[Any] = type_sequence_label_size
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : str = out_features
lowerCAmelCase__ : List[Any] = num_labels
lowerCAmelCase__ : Optional[int] = scope
lowerCAmelCase__ : Any = num_stages
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = None
if self.use_labels:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=a , loss_ignore_index=255 , num_labels=self.num_labels , )
def _lowerCamelCase ( self : Tuple , a : Tuple , a : Any , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = UperNetForSemanticSegmentation(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
(
lowerCAmelCase__
) : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = UperNetModelTester(self )
lowerCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class(a )
lowerCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase__ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='UperNet does not have a base model' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(a : str , a : Optional[Any] , a : Optional[Any] ):
lowerCAmelCase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[int] = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : List[str] = True
check_hidden_states_output(a , a , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Union[str, Any] = _config_zero_init(a )
lowerCAmelCase__ : Union[str, Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(config=a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='UperNet does not have tied weights' )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : int = UperNetForSemanticSegmentation.from_pretrained(a )
self.assertIsNotNone(a )
def lowerCAmelCase__ ( ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
lowerCAmelCase__ : int = Image.open(SCREAMING_SNAKE_CASE_ ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
lowerCAmelCase__ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(a )
lowerCAmelCase__ : int = prepare_img()
lowerCAmelCase__ : Optional[int] = processor(images=a , return_tensors='pt' ).to(a )
with torch.no_grad():
lowerCAmelCase__ : str = model(**a )
lowerCAmelCase__ : Optional[Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , a )
lowerCAmelCase__ : Tuple = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , a , atol=1E-4 ) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : int = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
lowerCAmelCase__ : int = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(a )
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : Dict = processor(images=a , return_tensors='pt' ).to(a )
with torch.no_grad():
lowerCAmelCase__ : str = model(**a )
lowerCAmelCase__ : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , a )
lowerCAmelCase__ : Any = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , a , atol=1E-4 ) )
| 369
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307
| 0
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> float:
lowerCAmelCase__ : Union[str, Any] = 0
while len(SCREAMING_SNAKE_CASE_ ) > 1:
lowerCAmelCase__ : Any = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
lowerCAmelCase__ : int = files.index(min(SCREAMING_SNAKE_CASE_ ) )
temp += files[min_index]
files.pop(SCREAMING_SNAKE_CASE_ )
files.append(SCREAMING_SNAKE_CASE_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A__ :
def __init__( self : List[str] , a : Any , a : Dict=13 , a : Optional[Any]=7 , a : Tuple=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=True , a : Dict=99 , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : Union[str, Any]=37 , a : Any="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Union[str, Any]=16 , a : Optional[int]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Tuple = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : int = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : List[Any] = scope
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = LlamaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Dict , a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = LlamaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , encoder_hidden_states=a , )
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] , a : Any , a : List[str] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : str , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowerCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = LlamaModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : int = type
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Dict = input_dict['input_ids']
lowerCAmelCase__ : Optional[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : Tuple = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : List[str] = 'single_label_classification'
lowerCAmelCase__ : List[Any] = input_dict['input_ids']
lowerCAmelCase__ : List[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : int = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Optional[Any] = 'multi_label_classification'
lowerCAmelCase__ : List[str] = input_dict['input_ids']
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ : Dict = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : List[Any] = LlamaModel(a )
original_model.to(a )
original_model.eval()
lowerCAmelCase__ : List[Any] = original_model(a ).last_hidden_state
lowerCAmelCase__ : str = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : Any = {'type': scaling_type, 'factor': 1_0.0}
lowerCAmelCase__ : Union[str, Any] = LlamaModel(a )
scaled_model.to(a )
scaled_model.eval()
lowerCAmelCase__ : Union[str, Any] = scaled_model(a ).last_hidden_state
lowerCAmelCase__ : Optional[int] = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowerCAmelCase__ : Any = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Dict = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : Any = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowerCAmelCase__ : str = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : str = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
lowerCAmelCase__ : int = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase__ : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCAmelCase__ : Tuple = 'Simply put, the theory of relativity states that '
lowerCAmelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCAmelCase__ : Dict = tokenizer.encode(a , return_tensors='pt' )
lowerCAmelCase__ : str = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=a )
# greedy generation outputs
lowerCAmelCase__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a )
lowerCAmelCase__ : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=a )
self.assertEqual(a , a )
| 307
| 0
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = RoFormerTokenizer
lowercase = RoFormerTokenizerFast
lowercase = True
lowercase = True
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
def _lowerCamelCase ( self : Dict , **a : int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **a )
def _lowerCamelCase ( self : Dict , **a : Union[str, Any] ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = '永和服装饰品有限公司,今天天气非常好'
lowerCAmelCase__ : Union[str, Any] = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.get_tokenizer()
lowerCAmelCase__ : Tuple = self.get_chinese_input_output_texts()
lowerCAmelCase__ : Union[str, Any] = tokenizer.tokenize(a )
self.assertListEqual(a , output_text.split() )
lowerCAmelCase__ : Union[str, Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase__ : Optional[int] = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.get_rust_tokenizer()
lowerCAmelCase__ : List[str] = self.get_chinese_input_output_texts()
lowerCAmelCase__ : Tuple = tokenizer.tokenize(a )
self.assertListEqual(a , output_text.split() )
lowerCAmelCase__ : List[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase__ : Dict = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
| 371
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 307
| 0
|
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = inspect.getfile(accelerate.test_utils )
lowerCAmelCase__ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
lowerCAmelCase__ : str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
lowerCAmelCase__ : Tuple = [sys.executable] + distributed_args
execute_subprocess_async(a , env=os.environ.copy() )
| 350
|
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out
| 307
| 0
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __magic_name__ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'LayoutLMv2ImageProcessor'
lowercase = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Dict , a : Optional[int]=None , a : List[str]=None , **a : Tuple ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
lowerCAmelCase__ : str = kwargs.pop('feature_extractor' )
lowerCAmelCase__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self : str , a : Dict , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
lowerCAmelCase__ : Optional[Any] = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
lowerCAmelCase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ : List[str] = features['words']
lowerCAmelCase__ : Any = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
lowerCAmelCase__ : Optional[Any] = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowerCAmelCase__ : Union[str, Any] = self.get_overflowing_images(a , encoded_inputs['overflow_to_sample_mapping'] )
lowerCAmelCase__ : Union[str, Any] = images
return encoded_inputs
def _lowerCamelCase ( self : str , a : str , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f''' {len(a )} and {len(a )}''' )
return images_with_overflow
def _lowerCamelCase ( self : Union[str, Any] , *a : Tuple , **a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def _lowerCamelCase ( self : List[str] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , )
return self.image_processor
| 351
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCamelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase__ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = """"""
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split("""::""")
lowerCamelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase__ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = """"""
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
| 0
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = OpenAIGPTTokenizer
lowercase = OpenAIGPTTokenizerFast
lowercase = True
lowercase = False
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCAmelCase__ : List[Any] = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : int = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowerCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(a ) )
def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any] ):
'''simple docstring'''
return "lower newer", "lower newer"
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase__ : Optional[int] = 'lower'
lowerCAmelCase__ : Optional[Any] = ['low', 'er</w>']
lowerCAmelCase__ : Optional[Any] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Optional[int] = tokens + ['<unk>']
lowerCAmelCase__ : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
def _lowerCamelCase ( self : Dict , a : List[Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(a , **a )
# Simple input
lowerCAmelCase__ : Optional[int] = 'This is a simple input'
lowerCAmelCase__ : int = ['This is a simple input 1', 'This is a simple input 2']
lowerCAmelCase__ : int = ('This is a simple input', 'This is a pair')
lowerCAmelCase__ : List[str] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' )
# Simple input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' )
# Simple input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
# Pair input
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' )
# Pair input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' )
# Pair input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A__ ( __magic_name__ ):
pass
| 352
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 307
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = KandinskyVaaPipeline
lowercase = [
'image_embeds',
'negative_image_embeds',
]
lowercase = ['image_embeds', 'negative_image_embeds']
lowercase = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase = False
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase__ : Tuple = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.dummy_unet
lowerCAmelCase__ : Optional[Any] = self.dummy_movq
lowerCAmelCase__ : Tuple = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=a , set_alpha_to_one=a , steps_offset=1 , prediction_type='epsilon' , thresholding=a , )
lowerCAmelCase__ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _lowerCamelCase ( self : List[Any] , a : List[Any] , a : List[Any]=0 ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(a ) ).to(a )
lowerCAmelCase__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Dict = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = 'cpu'
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = pipe(**self.get_dummy_inputs(a ) )
lowerCAmelCase__ : List[str] = output.images
lowerCAmelCase__ : Optional[int] = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Any = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
lowerCAmelCase__ : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(a )
lowerCAmelCase__ : str = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
lowerCAmelCase__ : int = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Any = 'red cat, 4k photo'
lowerCAmelCase__ : Tuple = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCAmelCase__ : Dict = pipe_prior(
a , generator=a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase__ : List[Any] = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCAmelCase__ : List[Any] = pipeline(
image_embeds=a , negative_image_embeds=a , generator=a , num_inference_steps=100 , output_type='np' , )
lowerCAmelCase__ : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(a , a )
| 353
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 0
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = []
for part_id in partition_order:
lowerCAmelCase__ : Tuple = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(SCREAMING_SNAKE_CASE_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : Optional[Any] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase__ : Optional[int] = spark.range(100 ).repartition(1 )
lowerCAmelCase__ : Optional[Any] = Spark(SCREAMING_SNAKE_CASE_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase__ : Tuple = spark.range(10 ).repartition(2 )
lowerCAmelCase__ : Optional[Any] = [1, 0]
lowerCAmelCase__ : Optional[int] = _generate_iterable_examples(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Reverse the partitions.
lowerCAmelCase__ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowerCAmelCase__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__ ( ) -> List[Any]:
lowerCAmelCase__ : List[str] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase__ : List[Any] = spark.range(10 ).repartition(1 )
lowerCAmelCase__ : List[Any] = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__ ( ) -> List[Any]:
lowerCAmelCase__ : Optional[Any] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase__ : List[Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
lowerCAmelCase__ : str = lambda SCREAMING_SNAKE_CASE_ : x.reverse()
lowerCAmelCase__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , [2, 1, 0] )
lowerCAmelCase__ : Tuple = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ ).shuffle_data_sources(SCREAMING_SNAKE_CASE_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__ ( ) -> List[Any]:
lowerCAmelCase__ : str = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase__ : Dict = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowerCAmelCase__ : Any = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCAmelCase__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowerCAmelCase__ : Any = SparkExamplesIterable(SCREAMING_SNAKE_CASE_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCAmelCase__ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : int = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCAmelCase__ ( ) -> str:
lowerCAmelCase__ : List[Any] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase__ : Any = spark.range(100 ).repartition(1 )
lowerCAmelCase__ : Optional[Any] = Spark(SCREAMING_SNAKE_CASE_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 354
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 307
| 0
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
_enforce_args(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if n == 0:
return 0
lowerCAmelCase__ : Tuple = float('-inf' )
for i in range(1 , n + 1 ):
lowerCAmelCase__ : int = max(
SCREAMING_SNAKE_CASE_ , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE_ ) )
return max_revue
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
_enforce_args(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowerCAmelCase__ : Optional[int] = float('-inf' )
for i in range(1 , n + 1 ):
lowerCAmelCase__ : Optional[int] = max(
SCREAMING_SNAKE_CASE_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
lowerCAmelCase__ : List[str] = max_revenue
return max_rev[n]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
_enforce_args(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowerCAmelCase__ : int = [float('-inf' ) for _ in range(n + 1 )]
lowerCAmelCase__ : List[Any] = 0
for i in range(1 , n + 1 ):
lowerCAmelCase__ : List[str] = max_rev[i]
for j in range(1 , i + 1 ):
lowerCAmelCase__ : Tuple = max(SCREAMING_SNAKE_CASE_ , prices[j - 1] + max_rev[i - j] )
lowerCAmelCase__ : Union[str, Any] = max_revenue_i
return max_rev[n]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if n < 0:
lowerCAmelCase__ : Optional[int] = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
if n > len(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : int = (
'Each integral piece of rod must have a corresponding price. '
F'''Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE_ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> List[Any]:
lowerCAmelCase__ : Any = [6, 10, 12, 15, 20, 23]
lowerCAmelCase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowerCAmelCase__ : List[Any] = 36
lowerCAmelCase__ : Dict = top_down_cut_rod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 355
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a )
lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a )
lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : str = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ):
'''simple docstring'''
lowerCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : List[Any] = model_a(**a )
lowerCAmelCase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : List[str] = model_a(**a )
lowerCAmelCase__ : int = after_outputs[0].numpy()
lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : int = 13
lowerCAmelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' )
lowerCAmelCase__ : str = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFViTModelTester(self )
lowerCAmelCase__ : Tuple = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ : Tuple = 13
lowerCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Any = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' )
lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModelTester(self )
lowerCAmelCase__ : List[str] = TFRobertaModelTester(self )
lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Dict = 13
lowerCAmelCase__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' )
lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self )
lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a )
lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
| 307
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowerCamelCase__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase__ = {
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
lowerCamelCase__ = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ElectraTokenizer
def __init__( self : int , a : str=None , a : int=None , a : List[Any]=True , a : Union[str, Any]="[UNK]" , a : Dict="[SEP]" , a : Union[str, Any]="[PAD]" , a : str="[CLS]" , a : Optional[int]="[MASK]" , a : Union[str, Any]=True , a : Any=None , **a : List[str] , ):
'''simple docstring'''
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
lowerCAmelCase__ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a ) != do_lower_case
or normalizer_state.get('strip_accents' , a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars
):
lowerCAmelCase__ : str = getattr(a , normalizer_state.pop('type' ) )
lowerCAmelCase__ : Optional[int] = do_lower_case
lowerCAmelCase__ : List[str] = strip_accents
lowerCAmelCase__ : Dict = tokenize_chinese_chars
lowerCAmelCase__ : List[Any] = normalizer_class(**a )
lowerCAmelCase__ : List[Any] = do_lower_case
def _lowerCamelCase ( self : Any , a : Tuple , a : Tuple=None ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : int , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Any = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : Optional[Any] , a : str , a : Optional[str] = None ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 356
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f:
lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Dict = []
for dataset in encoded_datasets:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Optional[Any] = with_conta
lowerCAmelCase__ : List[str] = with_conta
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : Optional[int] = mc_label
lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset)
lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Union[str, Any] = args.max_steps
lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Optional[int] = list(model.named_parameters() )
lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : int = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ):
lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy()
lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 307
| 0
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCAmelCase__ : Any = (boundary[1] - boundary[0]) / steps
lowerCAmelCase__ : int = boundary[0]
lowerCAmelCase__ : Union[str, Any] = boundary[1]
lowerCAmelCase__ : Dict = make_points(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = 0.0
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE_ )
for i in x_i:
# print(i)
y += h * f(SCREAMING_SNAKE_CASE_ )
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE_ )
return y
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : Any = a + h
while x < (b - h):
yield x
lowerCAmelCase__ : int = x + h
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any: # enter your function here
lowerCAmelCase__ : Any = (x - 0) * (x - 0)
return y
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : Tuple = 0.0 # Lower bound of integration
lowerCAmelCase__ : Dict = 1.0 # Upper bound of integration
lowerCAmelCase__ : List[Any] = 10.0 # define number of steps or resolution
lowerCAmelCase__ : str = [a, b] # define boundary of integration
lowerCAmelCase__ : Union[str, Any] = method_a(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F'''y = {y}''' )
if __name__ == "__main__":
main()
| 357
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 358
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 307
| 0
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 0
|
"""simple docstring"""
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class A__ :
def __init__( self : Optional[int] , a : List[str] , a : Optional[int] , a : bool = True , a : bool = False ):
'''simple docstring'''
lowerCAmelCase__ : Any = scheduler
lowerCAmelCase__ : List[Any] = optimizers if isinstance(a , (list, tuple) ) else [optimizers]
lowerCAmelCase__ : str = split_batches
lowerCAmelCase__ : Optional[Any] = step_with_optimizer
lowerCAmelCase__ : List[str] = GradientState()
def _lowerCamelCase ( self : Optional[Any] , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*a , **a )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*a , **a )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase__ : List[str] = AcceleratorState().num_processes
for _ in range(a ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*a , **a )
else:
self.scheduler.step(*a , **a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return self.scheduler.get_last_lr()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.scheduler.state_dict()
def _lowerCamelCase ( self : Union[str, Any] , a : int ):
'''simple docstring'''
self.scheduler.load_state_dict(a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.scheduler.get_lr()
def _lowerCamelCase ( self : Dict , *a : Union[str, Any] , **a : Tuple ):
'''simple docstring'''
return self.scheduler.print_lr(*a , **a )
| 360
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 0
|
import random
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple:
lowerCAmelCase__ : Union[str, Any] = [], [], []
for element in data:
if element < pivot:
less.append(SCREAMING_SNAKE_CASE_ )
elif element > pivot:
greater.append(SCREAMING_SNAKE_CASE_ )
else:
equal.append(SCREAMING_SNAKE_CASE_ )
return less, equal, greater
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(SCREAMING_SNAKE_CASE_ ) or index < 0:
return None
lowerCAmelCase__ : Optional[int] = items[random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )]
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Tuple = _partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = len(SCREAMING_SNAKE_CASE_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# must be in larger
else:
return quick_select(SCREAMING_SNAKE_CASE_ , index - (m + count) )
| 361
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
| 0
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(a )
lowerCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(a )
lowerCAmelCase__ : Optional[Any] = tokenizer('This is me' , return_tensors='pt' )
lowerCAmelCase__ : Any = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCAmelCase__ : str = model.generate(**a )
lowerCAmelCase__ : Tuple = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : int = AutoModelForSeqaSeqLM.from_pretrained(a )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCAmelCase__ : List[str] = model_reloaded.generate(**a )
self.assertTrue(torch.allclose(a , a ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase__ : int = AutoModelForSeqaSeqLM.from_pretrained(a )
lowerCAmelCase__ : int = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(a ):
model.save_pretrained(a )
lowerCAmelCase__ : str = model.reverse_bettertransformer()
model.save_pretrained(a )
| 362
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
| 307
| 0
|
from math import isclose, sqrt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple[float, float, float]:
lowerCAmelCase__ : Any = point_y / 4 / point_x
lowerCAmelCase__ : str = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase__ : List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase__ : str = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase__ : List[Any] = outgoing_gradient**2 + 4
lowerCAmelCase__ : Dict = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase__ : Dict = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCAmelCase__ : Optional[Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase__ : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase__ : List[Any] = x_minus if isclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else x_plus
lowerCAmelCase__ : Union[str, Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 1.4 , SCREAMING_SNAKE_CASE_ = -9.6 ) -> int:
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : float = first_x_coord
lowerCAmelCase__ : float = first_y_coord
lowerCAmelCase__ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase__ : List[Any] = next_point(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 363
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 )
| 307
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = 1
@register_to_config
def __init__( self : List[str] , a : int=2_000 , a : int=0.1 , a : Tuple=20 , a : Optional[int]=1E-3 ):
'''simple docstring'''
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : int = None
def _lowerCamelCase ( self : List[Any] , a : str , a : Union[str, torch.device] = None ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.linspace(1 , self.config.sampling_eps , a , device=a )
def _lowerCamelCase ( self : Dict , a : Optional[Any] , a : Any , a : int , a : List[str]=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCAmelCase__ : str = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCAmelCase__ : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCAmelCase__ : List[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCAmelCase__ : Optional[int] = std.unsqueeze(-1 )
lowerCAmelCase__ : Optional[Any] = -score / std
# compute
lowerCAmelCase__ : Optional[int] = -1.0 / len(self.timesteps )
lowerCAmelCase__ : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCAmelCase__ : Optional[int] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCAmelCase__ : Tuple = beta_t.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = -0.5 * beta_t * x
lowerCAmelCase__ : Dict = torch.sqrt(a )
lowerCAmelCase__ : str = drift - diffusion**2 * score
lowerCAmelCase__ : Tuple = x + drift * dt
# add noise
lowerCAmelCase__ : Optional[Any] = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype )
lowerCAmelCase__ : Union[str, Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 364
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 307
| 0
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 22 ) -> int:
lowerCAmelCase__ : int = range(1 , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = range(1 , SCREAMING_SNAKE_CASE_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 365
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# Load configuration defined in the metadata file
with open(SCREAMING_SNAKE_CASE_ ) as metadata_file:
lowerCAmelCase__ : List[Any] = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
lowerCAmelCase__ : Optional[int] = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['module']
# Load the entity vocab file
lowerCAmelCase__ : Optional[Any] = load_original_entity_vocab(SCREAMING_SNAKE_CASE_ )
# add an entry for [MASK2]
lowerCAmelCase__ : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowerCAmelCase__ : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCAmelCase__ : Optional[int] = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'r' ) as f:
lowerCAmelCase__ : Dict = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = 'MLukeTokenizer'
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Initialize the embeddings of the special tokens
lowerCAmelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(['@'] )[0]
lowerCAmelCase__ : Dict = tokenizer.convert_tokens_to_ids(['#'] )[0]
lowerCAmelCase__ : Optional[int] = state_dict['embeddings.word_embeddings.weight']
lowerCAmelCase__ : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
lowerCAmelCase__ : List[Any] = word_emb[enta_init_index].unsqueeze(0 )
lowerCAmelCase__ : Optional[int] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCAmelCase__ : Union[str, Any] = state_dict[bias_name]
lowerCAmelCase__ : Union[str, Any] = decoder_bias[ent_init_index].unsqueeze(0 )
lowerCAmelCase__ : Dict = decoder_bias[enta_init_index].unsqueeze(0 )
lowerCAmelCase__ : Tuple = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCAmelCase__ : List[Any] = F'''encoder.layer.{layer_index}.attention.self.'''
lowerCAmelCase__ : Any = state_dict[prefix + matrix_name]
lowerCAmelCase__ : Optional[Any] = state_dict[prefix + matrix_name]
lowerCAmelCase__ : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCAmelCase__ : int = state_dict['entity_embeddings.entity_embeddings.weight']
lowerCAmelCase__ : Tuple = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
lowerCAmelCase__ : Tuple = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowerCAmelCase__ : List[str] = state_dict['entity_predictions.bias']
lowerCAmelCase__ : Optional[Any] = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
lowerCAmelCase__ : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowerCAmelCase__ : Optional[int] = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE_ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
lowerCAmelCase__ : str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
lowerCAmelCase__ : str = state_dict[key]
else:
lowerCAmelCase__ : Tuple = state_dict[key]
lowerCAmelCase__ : Optional[Any] = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if set(SCREAMING_SNAKE_CASE_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(SCREAMING_SNAKE_CASE_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCAmelCase__ : Any = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , task='entity_classification' )
lowerCAmelCase__ : Tuple = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
lowerCAmelCase__ : Tuple = (0, 9)
lowerCAmelCase__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' )
lowerCAmelCase__ : List[Any] = model(**SCREAMING_SNAKE_CASE_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase__ : Union[str, Any] = torch.Size((1, 33, 768) )
lowerCAmelCase__ : Tuple = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase__ : List[Any] = torch.Size((1, 1, 768) )
lowerCAmelCase__ : Dict = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowerCAmelCase__ : int = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = 'Tokyo is the capital of <mask>.'
lowerCAmelCase__ : str = (24, 30)
lowerCAmelCase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' )
lowerCAmelCase__ : str = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = encoding['input_ids'][0].tolist()
lowerCAmelCase__ : Tuple = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
lowerCAmelCase__ : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = outputs.entity_logits[0][0].argmax().item()
lowerCAmelCase__ : str = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE_ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[int] = ['[MASK]', '[PAD]', '[UNK]']
lowerCAmelCase__ : str = [json.loads(SCREAMING_SNAKE_CASE_ ) for line in open(SCREAMING_SNAKE_CASE_ )]
lowerCAmelCase__ : Any = {}
for entry in data:
lowerCAmelCase__ : str = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCAmelCase__ : int = entity_id
break
lowerCAmelCase__ : Any = F'''{language}:{entity_name}'''
lowerCAmelCase__ : List[Any] = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
lowerCamelCase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 366
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowerCAmelCase__ : int = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(a ) , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) )
lowerCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : int = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Dict = tf.constant(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : int = jnp.array(a )
self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) )
lowerCAmelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = jnp.array(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) )
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[str] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : str = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) )
lowerCAmelCase__ : str = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Optional[Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) )
@require_torch
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = np.random.randn(3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = np.random.randn(3 , 4 )
lowerCAmelCase__ : Tuple = jnp.array(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
| 307
| 0
|
import os
import pytest
from attr import dataclass
lowerCamelCase__ = """us-east-1""" # defaults region
@dataclass
class A__ :
lowercase = 42
lowercase = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
lowercase = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5500,
}
lowercase = {**hyperparameters, 'max_steps': 1000}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return f'''{self.framework}-transfromers-test'''
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : Any = SageMakerTestEnvironment(framework=request.cls.framework )
| 367
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ = concatenate_datasets
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadManager
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 307
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 ) -> int:
lowerCAmelCase__ : str = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase__ : List[str] = load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ : Dict = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ : List[Any] = 8
else:
lowerCAmelCase__ : Any = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding='longest' , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase__ : Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Union[str, Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase__ = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , SCREAMING_SNAKE_CASE_ ) == "1":
lowerCAmelCase__ : Any = 2
# Initialize accelerator
lowerCAmelCase__ : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : int = config['lr']
lowerCAmelCase__ : List[str] = int(config['num_epochs'] )
lowerCAmelCase__ : Optional[Any] = int(config['seed'] )
lowerCAmelCase__ : Optional[int] = int(config['batch_size'] )
lowerCAmelCase__ : List[Any] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ : Optional[int] = MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ : List[str] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ : Tuple = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
lowerCAmelCase__ : Tuple = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ : str = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ : Dict = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = outputs.loss
lowerCAmelCase__ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowerCAmelCase__ : int = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ : int = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ : Optional[int] = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(SCREAMING_SNAKE_CASE_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowerCAmelCase__ : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase__ : str = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> Optional[Any]:
lowerCAmelCase__ : int = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCAmelCase__ : Any = parser.parse_args()
lowerCAmelCase__ : List[Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 368
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = DanceDiffusionPipeline
lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase = False
lowercase = False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase__ : Tuple = IPNDMScheduler()
lowerCAmelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a )
lowerCAmelCase__ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a )
lowerCAmelCase__ : List[Any] = pipe(**a )
lowerCAmelCase__ : List[str] = output.audios
lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase__ : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : int = output.audios
lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = torch_device
lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[int] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : str = output.audios
lowerCAmelCase__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 307
| 0
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A__ :
def __init__( self : List[Any] , a : Optional[int] , a : Tuple=13 , a : Union[str, Any]=7 , a : str=True , a : Optional[Any]=True , a : int=True , a : int=True , a : Tuple=99 , a : List[str]=64 , a : str=32 , a : Optional[Any]=5 , a : List[Any]=4 , a : Tuple=37 , a : str="gelu" , a : Union[str, Any]=0.1 , a : Dict=0.1 , a : List[Any]=512 , a : Optional[int]=16 , a : Optional[Any]=2 , a : Dict=0.0_2 , a : int=3 , a : Optional[Any]=4 , a : Union[str, Any]=None , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = parent
lowerCAmelCase__ : int = batch_size
lowerCAmelCase__ : int = seq_length
lowerCAmelCase__ : Union[str, Any] = is_training
lowerCAmelCase__ : Optional[Any] = use_input_mask
lowerCAmelCase__ : List[str] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : str = embedding_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : Union[str, Any] = type_vocab_size
lowerCAmelCase__ : Any = type_sequence_label_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : List[str] = num_choices
lowerCAmelCase__ : Any = scope
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : int = None
if self.use_token_type_ids:
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Dict = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Any , a : str , a : Any , a : Union[str, Any] , a : Any , a : List[str] , a : Tuple , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = MobileBertModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(a , attention_mask=a , token_type_ids=a )
lowerCAmelCase__ : Any = model(a , token_type_ids=a )
lowerCAmelCase__ : int = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self : Optional[Any] , a : Any , a : List[str] , a : Optional[Any] , a : List[str] , a : List[str] , a : List[Any] , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = MobileBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Tuple , a : Optional[Any] , a : Tuple , a : Optional[int] , a : Dict , a : List[Any] , a : List[Any] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = MobileBertForNextSentencePrediction(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self : str , a : str , a : Union[str, Any] , a : List[Any] , a : List[str] , a : Any , a : Any , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : str = MobileBertForPreTraining(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(
a , attention_mask=a , token_type_ids=a , labels=a , next_sentence_label=a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : Any , a : int , a : List[str] , a : str , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = MobileBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : List[str] , a : List[str] , a : List[str] , a : List[Any] , a : Dict , a : List[Any] , a : Tuple , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : int = MobileBertForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : str , a : Any , a : List[Any] , a : Optional[int] , a : List[Any] , a : Optional[int] , a : List[Any] , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.num_labels
lowerCAmelCase__ : Any = MobileBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : Any , a : Union[str, Any] , a : Optional[int] , a : Any , a : Optional[int] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.num_choices
lowerCAmelCase__ : List[Any] = MobileBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Any = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
(
lowerCAmelCase__
) : List[str] = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _lowerCamelCase ( self : Optional[Any] , a : Dict , a : Optional[Any] , a : str=False ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class in get_values(a ):
lowerCAmelCase__ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a )
lowerCAmelCase__ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = MobileBertModelTester(self )
lowerCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*a )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return torch.tensor(
SCREAMING_SNAKE_CASE_ , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(a )
lowerCAmelCase__ : Any = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(a )[0]
lowerCAmelCase__ : Dict = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : Any = torch.tensor(
[
[
[-2.4736526E07, 8.2691656E04, 1.6521838E05],
[-5.7541704E-01, 3.9056022E00, 4.4011507E00],
[2.6047359E00, 1.5677652E00, -1.7324188E-01],
]
] , device=a , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCAmelCase__ : List[Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCAmelCase__ : Optional[Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 369
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : str = DPTConfig()
if "large" in checkpoint_url:
lowerCAmelCase__ : str = 1_024
lowerCAmelCase__ : str = 4_096
lowerCAmelCase__ : Optional[Any] = 24
lowerCAmelCase__ : Optional[Any] = 16
lowerCAmelCase__ : Dict = [5, 11, 17, 23]
lowerCAmelCase__ : Optional[int] = [256, 512, 1_024, 1_024]
lowerCAmelCase__ : Optional[int] = (1, 384, 384)
if "ade" in checkpoint_url:
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : str = 150
lowerCAmelCase__ : str = 'huggingface/label-files'
lowerCAmelCase__ : List[str] = 'ade20k-id2label.json'
lowerCAmelCase__ : Any = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) ) , 'r' ) )
lowerCAmelCase__ : Optional[int] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = idalabel
lowerCAmelCase__ : Dict = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Any = [1, 150, 480, 480]
return config, expected_shape
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : Any = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCAmelCase__ : str = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
lowerCAmelCase__ : Optional[int] = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
lowerCAmelCase__ : Any = name.replace('patch_embed' , 'patch_embeddings' )
if "pos_embed" in name:
lowerCAmelCase__ : Optional[int] = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
lowerCAmelCase__ : str = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
lowerCAmelCase__ : Optional[int] = name.replace('proj' , 'projection' )
if "blocks" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
lowerCAmelCase__ : Dict = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name:
lowerCAmelCase__ : Tuple = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase__ : Any = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
lowerCAmelCase__ : Tuple = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
lowerCAmelCase__ : Dict = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
lowerCAmelCase__ : List[str] = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
lowerCAmelCase__ : Optional[int] = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
lowerCAmelCase__ : str = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCAmelCase__ : List[str] = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowerCAmelCase__ : Optional[int] = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
lowerCAmelCase__ : str = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
lowerCAmelCase__ : int = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCAmelCase__ : Tuple = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCAmelCase__ : str = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCAmelCase__ : List[Any] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCAmelCase__ : Dict = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCAmelCase__ : List[str] = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
lowerCAmelCase__ : List[Any] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
lowerCAmelCase__ : List[Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
lowerCAmelCase__ : Tuple = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
lowerCAmelCase__ : List[str] = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
lowerCAmelCase__ : int = name.replace('pretrained' , 'dpt' )
if "bn" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('bn' , 'batch_norm' )
if "head" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
lowerCAmelCase__ : Any = name.replace('auxlayer' , 'auxiliary_head.head' )
return name
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : int = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowerCAmelCase__ : Tuple = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Any = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase__ : str = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( ) -> Optional[Any]:
lowerCAmelCase__ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ : List[str] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Union[str, Any] = get_dpt_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict from URL
lowerCAmelCase__ : Union[str, Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase__ : str = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = val
# read in qkv matrices
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
lowerCAmelCase__ : Any = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
# Check outputs on an image
lowerCAmelCase__ : str = 480 if 'ade' in checkpoint_url else 384
lowerCAmelCase__ : int = DPTImageProcessor(size=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : Optional[int] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
# forward pass
lowerCAmelCase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ ).logits if 'ade' in checkpoint_url else model(**SCREAMING_SNAKE_CASE_ ).predicted_depth
# Assert logits
lowerCAmelCase__ : Optional[Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
lowerCAmelCase__ : str = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(SCREAMING_SNAKE_CASE_ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , SCREAMING_SNAKE_CASE_ )
)
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
lowerCamelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 370
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A__ :
def __init__( self : List[str] , a : Any , a : Dict=13 , a : Optional[Any]=7 , a : Tuple=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=True , a : Dict=99 , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : Union[str, Any]=37 , a : Any="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Union[str, Any]=16 , a : Optional[int]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Tuple = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : int = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : List[Any] = scope
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = LlamaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Dict , a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = LlamaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , encoder_hidden_states=a , )
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] , a : Any , a : List[str] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : str , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowerCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = LlamaModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : int = type
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Dict = input_dict['input_ids']
lowerCAmelCase__ : Optional[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : Tuple = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : List[str] = 'single_label_classification'
lowerCAmelCase__ : List[Any] = input_dict['input_ids']
lowerCAmelCase__ : List[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : int = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Optional[Any] = 'multi_label_classification'
lowerCAmelCase__ : List[str] = input_dict['input_ids']
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ : Dict = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : List[Any] = LlamaModel(a )
original_model.to(a )
original_model.eval()
lowerCAmelCase__ : List[Any] = original_model(a ).last_hidden_state
lowerCAmelCase__ : str = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : Any = {'type': scaling_type, 'factor': 1_0.0}
lowerCAmelCase__ : Union[str, Any] = LlamaModel(a )
scaled_model.to(a )
scaled_model.eval()
lowerCAmelCase__ : Union[str, Any] = scaled_model(a ).last_hidden_state
lowerCAmelCase__ : Optional[int] = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowerCAmelCase__ : Any = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Dict = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : Any = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowerCAmelCase__ : str = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : str = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
lowerCAmelCase__ : int = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase__ : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCAmelCase__ : Tuple = 'Simply put, the theory of relativity states that '
lowerCAmelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCAmelCase__ : Dict = tokenizer.encode(a , return_tensors='pt' )
lowerCAmelCase__ : str = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=a )
# greedy generation outputs
lowerCAmelCase__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a )
lowerCAmelCase__ : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=a )
self.assertEqual(a , a )
| 307
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = KandinskyImgaImgPipeline
lowercase = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
lowercase = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
lowercase = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase = False
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
lowerCAmelCase__ : str = MultilingualCLIP(a )
lowerCAmelCase__ : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase__ : List[str] = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.dummy_text_encoder
lowerCAmelCase__ : List[str] = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_unet
lowerCAmelCase__ : str = self.dummy_movq
lowerCAmelCase__ : List[str] = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCAmelCase__ : Dict = DDIMScheduler(**a )
lowerCAmelCase__ : Any = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _lowerCamelCase ( self : str , a : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a ) ).to(a )
lowerCAmelCase__ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a )
# create init_image
lowerCAmelCase__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a ) ).to(a )
lowerCAmelCase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ : int = Image.fromarray(np.uinta(a ) ).convert('RGB' ).resize((256, 256) )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : int = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Union[str, Any] = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = pipe(**self.get_dummy_inputs(a ) )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : Optional[int] = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Tuple = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
lowerCAmelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase__ : List[Any] = 'A red cartoon frog, 4k'
lowerCAmelCase__ : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(a )
lowerCAmelCase__ : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
lowerCAmelCase__ : str = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : Tuple = pipe_prior(
a , generator=a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase__ : List[Any] = pipeline(
a , image=a , image_embeds=a , negative_image_embeds=a , generator=a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
lowerCAmelCase__ : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a , a )
| 371
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 307
| 0
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
lowerCAmelCase__ : Dict = len(SCREAMING_SNAKE_CASE_ )
# We need to create solution object to save path.
lowerCAmelCase__ : Union[str, Any] = [[0 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )]
lowerCAmelCase__ : List[str] = run_maze(SCREAMING_SNAKE_CASE_ , 0 , 0 , SCREAMING_SNAKE_CASE_ )
if solved:
print('\n'.join(str(SCREAMING_SNAKE_CASE_ ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
# Final check point.
if i == j == (size - 1):
lowerCAmelCase__ : Union[str, Any] = 1
return True
lowerCAmelCase__ : int = (not i < 0) and (not j < 0) # Check lower bounds
lowerCAmelCase__ : str = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCAmelCase__ : int = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCAmelCase__ : Union[str, Any] = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE_ , i + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or run_maze(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , j + 1 , SCREAMING_SNAKE_CASE_ )
or run_maze(SCREAMING_SNAKE_CASE_ , i - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or run_maze(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , j - 1 , SCREAMING_SNAKE_CASE_ )
):
return True
lowerCAmelCase__ : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out
| 307
| 0
|
from __future__ import annotations
import os
from typing import Any
import requests
lowerCamelCase__ = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCamelCase__ = BASE_URL + """/user"""
# https://github.com/settings/tokens
lowerCamelCase__ = os.environ.get("""USER_TOKEN""", """""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict[Any, Any]:
lowerCAmelCase__ : Any = {
'Authorization': F'''token {auth_token}''',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 351
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCamelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase__ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = """"""
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split("""::""")
lowerCamelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase__ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = """"""
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
| 0
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list[int]:
lowerCAmelCase__ : Optional[int] = [True] * limit
lowerCAmelCase__ : Optional[Any] = False
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Dict = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCAmelCase__ : int = i * 2
while index < limit:
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : List[Any] = index + i
lowerCAmelCase__ : List[Any] = [2]
for i in range(3 , SCREAMING_SNAKE_CASE_ , 2 ):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE_ )
return primes
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 1_000_000 ) -> int:
lowerCAmelCase__ : List[Any] = prime_sieve(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Optional[int] = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(i + length , len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ : Union[str, Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCAmelCase__ : int = j - i
lowerCAmelCase__ : Optional[Any] = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 352
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 307
| 0
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
def __init__( self : Optional[int] , a : Dict , a : Tuple=13 , a : Union[str, Any]=30 , a : int=2 , a : Dict=3 , a : Optional[int]=True , a : Dict=True , a : Union[str, Any]=32 , a : List[Any]=5 , a : str=4 , a : Optional[int]=37 , a : Tuple="gelu" , a : Optional[int]=0.1 , a : int=0.1 , a : List[str]=10 , a : str=0.0_2 , a : Union[str, Any]=None , a : Optional[int]=2 , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : Tuple = image_size
lowerCAmelCase__ : List[Any] = patch_size
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : int = use_labels
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : str = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Any = scope
lowerCAmelCase__ : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Any = (image_size // patch_size) ** 2
lowerCAmelCase__ : Optional[int] = num_patches + 1
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Dict = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCamelCase ( self : List[Any] , a : int , a : List[Any] , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ViTModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Optional[int] , a : Any , a : Dict , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = ViTForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : List[str] = ViTForMaskedImageModeling(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : Dict = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : List[str] , a : Tuple , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.type_sequence_label_size
lowerCAmelCase__ : Union[str, Any] = ViTForImageClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : List[str] = ViTForImageClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : int = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
(
lowerCAmelCase__
) : Dict = config_and_inputs
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = False
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = ViTModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class(a )
lowerCAmelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Optional[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[Any] = ViTModel.from_pretrained(a )
self.assertIsNotNone(a )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a )
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : List[str] = prepare_img()
lowerCAmelCase__ : Dict = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Dict = model(**a )
# verify the logits
lowerCAmelCase__ : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a )
lowerCAmelCase__ : List[Any] = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
lowerCAmelCase__ : Dict = prepare_img()
lowerCAmelCase__ : List[Any] = image_processor(images=a , return_tensors='pt' )
lowerCAmelCase__ : Optional[int] = inputs.pixel_values.to(a )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(a , interpolate_pos_encoding=a )
# verify the logits
lowerCAmelCase__ : str = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a )
lowerCAmelCase__ : int = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : int = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
lowerCAmelCase__ : str = self.default_image_processor
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : Optional[Any] = image_processor(images=a , return_tensors='pt' )
lowerCAmelCase__ : str = inputs.pixel_values.to(a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(a )
| 353
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 0
|
from collections.abc import Callable
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
lowerCAmelCase__ : float = a
lowerCAmelCase__ : float = b
if function(SCREAMING_SNAKE_CASE_ ) == 0: # one of the a or b is a root for the function
return a
elif function(SCREAMING_SNAKE_CASE_ ) == 0:
return b
elif (
function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
lowerCAmelCase__ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(SCREAMING_SNAKE_CASE_ ) == 0:
return mid
elif function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) < 0:
lowerCAmelCase__ : List[Any] = mid
else:
lowerCAmelCase__ : List[Any] = mid
lowerCAmelCase__ : List[str] = start + (end - start) / 2.0
return mid
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 354
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 307
| 0
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class A__ ( __magic_name__ ):
def __init__( self : Dict , a : Optional[Any] , a : Union[str, Any]=13 , a : Optional[Any]=7 , a : Tuple=True , a : Any=True , a : List[Any]=False , a : Dict=True , a : int=99 , a : Optional[Any]=32 , a : Union[str, Any]=5 , a : List[str]=4 , a : Dict=37 , a : Optional[int]="gelu" , a : Dict=0.1 , a : Dict=0.1 , a : Optional[int]=512 , a : Any=16 , a : int=2 , a : List[str]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Any=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : List[str] = seq_length
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Any = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : str = vocab_size
lowerCAmelCase__ : Tuple = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : List[str] = hidden_act
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : str = max_position_embeddings
lowerCAmelCase__ : Optional[Any] = type_vocab_size
lowerCAmelCase__ : Dict = type_sequence_label_size
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : Any = num_labels
lowerCAmelCase__ : Optional[Any] = num_choices
lowerCAmelCase__ : Optional[int] = scope
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : int = None
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Union[str, Any] , a : Any , a : Dict , a : int , a : Union[str, Any] , a : Any , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = DistilBertModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : List[Any] , a : Tuple , a : Union[str, Any] , a : Optional[int] , a : Any , a : List[str] , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] , a : List[Any] , a : int , a : Optional[Any] , a : Optional[Any] , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : str , a : List[Any] , a : Any , a : int , a : Optional[Any] , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.num_labels
lowerCAmelCase__ : int = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : Optional[int] , a : Dict , a : Any , a : List[Any] , a : str , a : str , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.num_labels
lowerCAmelCase__ : Optional[Any] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : Any , a : str , a : str , a : Any , a : Optional[Any] , a : int , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.num_choices
lowerCAmelCase__ : Tuple = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Dict = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(lowerCAmelCase__) : Optional[Any] = config_and_inputs
lowerCAmelCase__ : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = True
lowercase = True
lowercase = True
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = DistilBertModelTester(self )
lowerCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=a , dim=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Union[str, Any] = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase__ : str = True
lowerCAmelCase__ : List[str] = model_class(config=a )
lowerCAmelCase__ : Optional[Any] = self._prepare_for_class(a , a )
lowerCAmelCase__ : Optional[int] = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowerCAmelCase__ : Tuple = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowerCAmelCase__ : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowerCAmelCase__ : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : Any = model(a , attention_mask=a )[0]
lowerCAmelCase__ : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : Optional[int] = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1E-4 ) )
| 355
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a )
lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a )
lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : str = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ):
'''simple docstring'''
lowerCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : List[Any] = model_a(**a )
lowerCAmelCase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : List[str] = model_a(**a )
lowerCAmelCase__ : int = after_outputs[0].numpy()
lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : int = 13
lowerCAmelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' )
lowerCAmelCase__ : str = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFViTModelTester(self )
lowerCAmelCase__ : Tuple = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ : Tuple = 13
lowerCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Any = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' )
lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModelTester(self )
lowerCAmelCase__ : List[str] = TFRobertaModelTester(self )
lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Dict = 13
lowerCAmelCase__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' )
lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self )
lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a )
lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
| 307
| 0
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A__ ( __magic_name__ ):
lowercase = 'char'
lowercase = 'bpe'
lowercase = 'wp'
lowerCamelCase__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A__ ( __magic_name__ ):
lowercase = ['image_processor', 'char_tokenizer']
lowercase = 'ViTImageProcessor'
lowercase = 'MgpstrTokenizer'
def __init__( self : Any , a : Any=None , a : Optional[int]=None , **a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
lowerCAmelCase__ : Union[str, Any] = kwargs.pop('feature_extractor' )
lowerCAmelCase__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
lowerCAmelCase__ : List[str] = tokenizer
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('gpt2' )
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(a , a )
def __call__( self : List[str] , a : str=None , a : Dict=None , a : Any=None , **a : List[str] ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowerCAmelCase__ : Union[str, Any] = self.image_processor(a , return_tensors=a , **a )
if text is not None:
lowerCAmelCase__ : Union[str, Any] = self.char_tokenizer(a , return_tensors=a , **a )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ : Union[str, Any] = encodings['input_ids']
return inputs
def _lowerCamelCase ( self : List[Any] , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = sequences
lowerCAmelCase__ : Dict = char_preds.size(0 )
lowerCAmelCase__ : Any = self._decode_helper(a , 'char' )
lowerCAmelCase__ : Dict = self._decode_helper(a , 'bpe' )
lowerCAmelCase__ : Dict = self._decode_helper(a , 'wp' )
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[int] = []
for i in range(a ):
lowerCAmelCase__ : Tuple = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ : Union[str, Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ : List[Any] = scores.index(max(a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : Dict = final_strs
lowerCAmelCase__ : int = final_scores
lowerCAmelCase__ : Tuple = char_strs
lowerCAmelCase__ : List[str] = bpe_strs
lowerCAmelCase__ : Optional[Any] = wp_strs
return out
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[str] ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ : int = self.char_decode
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : List[Any] = '[s]'
elif format == DecodeType.BPE:
lowerCAmelCase__ : Optional[Any] = self.bpe_decode
lowerCAmelCase__ : int = 2
lowerCAmelCase__ : Union[str, Any] = '#'
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ : str = self.wp_decode
lowerCAmelCase__ : Optional[Any] = 102
lowerCAmelCase__ : str = '[SEP]'
else:
raise ValueError(f'''Format {format} is not supported.''' )
lowerCAmelCase__ : Optional[int] = [], []
lowerCAmelCase__ : str = pred_logits.size(0 )
lowerCAmelCase__ : str = pred_logits.size(1 )
lowerCAmelCase__ : Optional[Any] = pred_logits.topk(1 , dim=-1 , largest=a , sorted=a )
lowerCAmelCase__ : Tuple = preds_index.view(-1 , a )[:, 1:]
lowerCAmelCase__ : List[str] = decoder(a )
lowerCAmelCase__ : List[Any] = torch.nn.functional.softmax(a , dim=2 ).max(dim=2 )
lowerCAmelCase__ : Any = preds_max_prob[:, 1:]
for index in range(a ):
lowerCAmelCase__ : Optional[Any] = preds_str[index].find(a )
lowerCAmelCase__ : str = preds_str[index][:pred_eos]
lowerCAmelCase__ : Optional[Any] = preds_index[index].cpu().tolist()
lowerCAmelCase__ : List[str] = pred_index.index(a ) if eos_token in pred_index else -1
lowerCAmelCase__ : int = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ : str = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a )
conf_scores.append(a )
return dec_strs, conf_scores
def _lowerCamelCase ( self : Optional[Any] , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(a )]
return decode_strs
def _lowerCamelCase ( self : str , a : Tuple ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(a )
def _lowerCamelCase ( self : Union[str, Any] , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(a )]
return decode_strs
| 356
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f:
lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Dict = []
for dataset in encoded_datasets:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Optional[Any] = with_conta
lowerCAmelCase__ : List[str] = with_conta
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : Optional[int] = mc_label
lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset)
lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Union[str, Any] = args.max_steps
lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Optional[int] = list(model.named_parameters() )
lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : int = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ):
lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy()
lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 307
| 0
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = [[float('inf' ) for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(SCREAMING_SNAKE_CASE_ ):
# looping through rows of graph array
for i in range(SCREAMING_SNAKE_CASE_ ):
# looping through columns of graph array
for j in range(SCREAMING_SNAKE_CASE_ ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowerCAmelCase__ : int = dist[i][k] + dist[k][j]
_print_dist(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return dist, v
if __name__ == "__main__":
lowerCamelCase__ = int(input("""Enter number of vertices: """))
lowerCamelCase__ = int(input("""Enter number of edges: """))
lowerCamelCase__ = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
lowerCamelCase__ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
lowerCamelCase__ = int(input("""Enter source:"""))
lowerCamelCase__ = int(input("""Enter destination:"""))
lowerCamelCase__ = float(input("""Enter weight:"""))
lowerCamelCase__ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 357
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
| 0
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return 1 / (1 + np.exp(-z ))
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return (-y * np.log(SCREAMING_SNAKE_CASE_ ) - (1 - y) * np.log(1 - h )).mean()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : int = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return np.sum(y * scores - np.log(1 + np.exp(SCREAMING_SNAKE_CASE_ ) ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=70_000 ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = np.zeros(x.shape[1] )
for iterations in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[Any] = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = sigmoid_function(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = np.dot(x.T , h - y ) / y.size
lowerCAmelCase__ : List[Any] = theta - alpha * gradient # updating the weights
lowerCAmelCase__ : Dict = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sigmoid_function(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = cost_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if iterations % 100 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
lowerCamelCase__ = datasets.load_iris()
lowerCamelCase__ = iris.data[:, :2]
lowerCamelCase__ = (iris.target != 0) * 1
lowerCamelCase__ = 0.1
lowerCamelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
return sigmoid_function(
np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((lowerCamelCase__) , (lowerCamelCase__)) = (x[:, 0].min(), x[:, 0].max())
((lowerCamelCase__) , (lowerCamelCase__)) = (x[:, 1].min(), x[:, 1].max())
((lowerCamelCase__) , (lowerCamelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
lowerCamelCase__ = np.c_[xxa.ravel(), xxa.ravel()]
lowerCamelCase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 358
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 307
| 0
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase__ = trt.Logger(trt.Logger.WARNING)
lowerCamelCase__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=384,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=128,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
lowerCamelCase__ = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
lowerCamelCase__ = args.per_device_eval_batch_size
lowerCamelCase__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase__ = True
lowerCamelCase__ = """temp_engine/bert-fp32.engine"""
if args.fpaa:
lowerCamelCase__ = """temp_engine/bert-fp16.engine"""
if args.inta:
lowerCamelCase__ = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
lowerCamelCase__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase__ = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : List[Any] = np.asarray(inputs['input_ids'] , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
lowerCAmelCase__ : Any = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , SCREAMING_SNAKE_CASE_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , SCREAMING_SNAKE_CASE_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , SCREAMING_SNAKE_CASE_ )
# start time
lowerCAmelCase__ : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(SCREAMING_SNAKE_CASE_ ) for d_inp in d_inputs] + [int(SCREAMING_SNAKE_CASE_ ), int(SCREAMING_SNAKE_CASE_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase__ : List[Any] = time.time()
lowerCAmelCase__ : str = end_time - start_time
lowerCAmelCase__ : Dict = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase__ = raw_datasets["""validation"""].column_names
lowerCamelCase__ = """question""" if """question""" in column_names else column_names[0]
lowerCamelCase__ = """context""" if """context""" in column_names else column_names[1]
lowerCamelCase__ = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase__ = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
lowerCamelCase__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowerCAmelCase__ : Tuple = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase__ : str = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=SCREAMING_SNAKE_CASE_ , stride=args.doc_stride , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase__ : str = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase__ : Union[str, Any] = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase__ : int = tokenized_examples.sequence_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase__ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase__ : List[str] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
lowerCamelCase__ = raw_datasets["""validation"""]
# Validation Feature Creation
lowerCamelCase__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
lowerCamelCase__ = default_data_collator
lowerCamelCase__ = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
lowerCamelCase__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="eval" ) -> List[str]:
# Post-processing: we match the start logits and end logits to answers in the original context.
lowerCAmelCase__ : Tuple = postprocess_qa_predictions(
examples=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , predictions=SCREAMING_SNAKE_CASE_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=SCREAMING_SNAKE_CASE_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase__ : Tuple = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase__ : Optional[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase__ : List[str] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=SCREAMING_SNAKE_CASE_ , label_ids=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any:
return trt.volume(engine.get_binding_shape(SCREAMING_SNAKE_CASE_ ) ) * engine.get_binding_dtype(SCREAMING_SNAKE_CASE_ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase__ = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase__ = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(F""" Num examples = {len(eval_dataset)}""")
logger.info(F""" Batch size = {args.per_device_eval_batch_size}""")
lowerCamelCase__ = 0.0
lowerCamelCase__ = 0
lowerCamelCase__ = timeit.default_timer()
lowerCamelCase__ = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase__ , lowerCamelCase__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase__ , lowerCamelCase__ = outputs
lowerCamelCase__ = torch.tensor(start_logits)
lowerCamelCase__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
lowerCamelCase__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
lowerCamelCase__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
lowerCamelCase__ = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase__ = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1000 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1000))
logger.info("""Total Number of Inference = %d""", niter)
lowerCamelCase__ = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"""Evaluation metrics: {eval_metric}""")
| 359
|
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 0
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCamelCase__ = CLIPImageProcessor()
lowerCamelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
lowerCamelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 360
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 0
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list:
lowerCAmelCase__ : int = word.split()
def justify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Any = max_width - width
lowerCAmelCase__ : Optional[int] = len(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase__ : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase__ : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase__ : Any = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(SCREAMING_SNAKE_CASE_ ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase__ : List[Any] = []
for i in range(SCREAMING_SNAKE_CASE_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : list[str] = []
lowerCAmelCase__ : str = 0
for word in words:
if width + len(SCREAMING_SNAKE_CASE_ ) + len(SCREAMING_SNAKE_CASE_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(SCREAMING_SNAKE_CASE_ )
width += len(SCREAMING_SNAKE_CASE_ )
else:
# justify the line and add it to result
answer.append(justify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# reset new line and new width
lowerCAmelCase__ : Any = [word], len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = max_width - width - len(SCREAMING_SNAKE_CASE_ )
answer.append(' '.join(SCREAMING_SNAKE_CASE_ ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 361
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 ) -> Dict:
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase__ : Tuple = load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : Tuple = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ : str = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ : List[str] = 8
else:
lowerCAmelCase__ : Any = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding='longest' , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase__ : int = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase__ = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , SCREAMING_SNAKE_CASE_ ) == "1":
lowerCAmelCase__ : List[Any] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCAmelCase__ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowerCAmelCase__ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Optional[int] = config['lr']
lowerCAmelCase__ : List[Any] = int(config['num_epochs'] )
lowerCAmelCase__ : List[str] = int(config['seed'] )
lowerCAmelCase__ : List[Any] = int(config['batch_size'] )
set_seed(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ : Tuple = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ : List[Any] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ : int = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ : Optional[int] = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
lowerCAmelCase__ : Dict = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ : List[Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCAmelCase__ : int = os.path.split(SCREAMING_SNAKE_CASE_ )[-1].split('.' )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCAmelCase__ : int = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ : Dict = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCAmelCase__ : int = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(SCREAMING_SNAKE_CASE_ ),
'epoch': epoch,
} , step=SCREAMING_SNAKE_CASE_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase__ ( ) -> List[str]:
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=SCREAMING_SNAKE_CASE_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowerCAmelCase__ : Dict = parser.parse_args()
lowerCAmelCase__ : Optional[Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 362
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def _lowerCamelCase ( self : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' )
lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a )
lowerCAmelCase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : Dict = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowerCAmelCase__ : List[Any] = copy.deepcopy(a )
lowerCAmelCase__ : Dict = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = GenerationConfig()
lowerCAmelCase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowerCAmelCase__ : int = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase__ : List[Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
| 307
| 0
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
lowerCamelCase__ = """▁"""
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Any , a : List[Any] , a : Dict="</s>" , a : Tuple="<unk>" , a : Optional[int]="<pad>" , a : Any=100 , a : List[Any]=None , a : Optional[Dict[str, Any]] = None , a : Optional[int]=True , **a : Union[str, Any] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase__ : str = [f'''<extra_id_{i}>''' for i in range(a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCAmelCase__ : Optional[int] = len(set(filter(lambda a : bool('extra_id' in str(a ) ) , a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
lowerCAmelCase__ : Optional[Any] = legacy
lowerCAmelCase__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a , unk_token=a , pad_token=a , extra_ids=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , legacy=a , **a , )
lowerCAmelCase__ : Tuple = vocab_file
lowerCAmelCase__ : Dict = extra_ids
lowerCAmelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@staticmethod
def _lowerCamelCase ( a : Optional[Any] , a : str , a : List[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowerCAmelCase__ : Optional[int] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , a , )
return max_model_length
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a )) + [1]
return ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return list(
set(filter(lambda a : bool(re.search(R'<extra_id_\d+>' , a ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return [self._convert_token_to_id(a ) for token in self.get_sentinel_tokens()]
def _lowerCamelCase ( self : Optional[Any] , a : List[int] ):
'''simple docstring'''
if len(a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _lowerCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self : Any , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self._add_eos_if_not_present(a )
if token_ids_a is None:
return token_ids_a
else:
lowerCAmelCase__ : int = self._add_eos_if_not_present(a )
return token_ids_a + token_ids_a
def __getstate__( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.__dict__.copy()
lowerCAmelCase__ : Union[str, Any] = None
return state
def __setstate__( self : str , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self : List[str] , a : "TextInput" , **a : int ):
'''simple docstring'''
if not self.legacy:
lowerCAmelCase__ : Any = SPIECE_UNDERLINE + text.replace(a , ' ' )
return super().tokenize(a , **a )
def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , **a : Tuple ):
'''simple docstring'''
if not self.legacy:
lowerCAmelCase__ : Dict = text.startswith(a )
if is_first:
lowerCAmelCase__ : Union[str, Any] = text[1:]
lowerCAmelCase__ : Tuple = self.sp_model.encode(a , out_type=a )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(a ):
lowerCAmelCase__ : Union[str, Any] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _lowerCamelCase ( self : List[Any] , a : List[str] ):
'''simple docstring'''
if token.startswith('<extra_id_' ):
lowerCAmelCase__ : Any = re.match(R'<extra_id_(\d+)>' , a )
lowerCAmelCase__ : Optional[Any] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a )
def _lowerCamelCase ( self : List[str] , a : int ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
lowerCAmelCase__ : int = self.sp_model.IdToPiece(a )
else:
lowerCAmelCase__ : Optional[int] = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def _lowerCamelCase ( self : str , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : str = ''
lowerCAmelCase__ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : List[str] = []
else:
current_sub_tokens.append(a )
lowerCAmelCase__ : Tuple = False
out_string += self.sp_model.decode(a )
return out_string.strip()
def _lowerCamelCase ( self : Union[str, Any] , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase__ : Optional[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
lowerCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
| 363
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 )
| 307
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 364
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 307
| 0
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum ):
lowercase = 0
lowercase = 1
lowercase = 2
@add_end_docstrings(__magic_name__ )
class A__ ( __magic_name__ ):
lowercase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self : Optional[int] , *a : Union[str, Any] , **a : Optional[int] ):
super().__init__(*a , **a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase__ : str = None
if self.model.config.prefix is not None:
lowerCAmelCase__ : List[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase__ : List[str] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase__ : str = self._sanitize_parameters(prefix=a , **self._forward_params )
lowerCAmelCase__ : Any = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase__ : Optional[int] = {**self._forward_params, **forward_params}
def _lowerCamelCase ( self : Optional[Any] , a : Any=None , a : Optional[int]=None , a : str=None , a : Union[str, Any]=None , a : Optional[Any]=None , a : Union[str, Any]=None , a : Optional[Any]=None , a : List[Any]=None , **a : str , ):
lowerCAmelCase__ : Optional[Any] = {}
if prefix is not None:
lowerCAmelCase__ : int = prefix
if prefix:
lowerCAmelCase__ : Optional[Any] = self.tokenizer(
a , padding=a , add_special_tokens=a , return_tensors=self.framework )
lowerCAmelCase__ : Dict = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
lowerCAmelCase__ : List[str] = handle_long_generation
preprocess_params.update(a )
lowerCAmelCase__ : Any = generate_kwargs
lowerCAmelCase__ : Tuple = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
lowerCAmelCase__ : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
lowerCAmelCase__ : Any = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase__ : Optional[int] = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase__ : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase__ : Optional[int] = self.tokenizer.encode(a , add_special_tokens=a )
if len(a ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
lowerCAmelCase__ : Any = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _lowerCamelCase ( self : Dict , *a : str , **a : List[Any] ):
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*a , **a )
def __call__( self : str , a : Any , **a : Optional[Any] ):
return super().__call__(a , **a )
def _lowerCamelCase ( self : str , a : int , a : Tuple="" , a : Optional[int]=None , **a : Optional[int] ):
lowerCAmelCase__ : Dict = self.tokenizer(
prefix + prompt_text , padding=a , add_special_tokens=a , return_tensors=self.framework )
lowerCAmelCase__ : Optional[int] = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase__ : Optional[int] = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase__ : Dict = generate_kwargs['max_new_tokens']
else:
lowerCAmelCase__ : Tuple = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase__ : str = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
lowerCAmelCase__ : Union[str, Any] = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase__ : str = inputs['attention_mask'][:, -keep_length:]
return inputs
def _lowerCamelCase ( self : Dict , a : Optional[Any] , **a : int ):
lowerCAmelCase__ : List[str] = model_inputs['input_ids']
lowerCAmelCase__ : Optional[Any] = model_inputs.get('attention_mask' , a )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Any = 1
else:
lowerCAmelCase__ : Tuple = input_ids.shape[0]
lowerCAmelCase__ : Dict = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase__ : Optional[Any] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
lowerCAmelCase__ : int = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase__ : Optional[int] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase__ : List[Any] = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase__ : Any = self.model.generate(input_ids=a , attention_mask=a , **a )
lowerCAmelCase__ : List[str] = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase__ : List[Any] = generated_sequence.reshape(a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCAmelCase__ : Union[str, Any] = tf.reshape(a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : Optional[int]=ReturnType.FULL_TEXT , a : List[Any]=True ):
lowerCAmelCase__ : List[Any] = model_outputs['generated_sequence'][0]
lowerCAmelCase__ : Dict = model_outputs['input_ids']
lowerCAmelCase__ : Dict = model_outputs['prompt_text']
lowerCAmelCase__ : Dict = generated_sequence.numpy().tolist()
lowerCAmelCase__ : str = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase__ : Tuple = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase__ : int = self.tokenizer.decode(
a , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase__ : Optional[Any] = 0
else:
lowerCAmelCase__ : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase__ : List[str] = prompt_text + text[prompt_length:]
else:
lowerCAmelCase__ : Any = text[prompt_length:]
lowerCAmelCase__ : int = {'generated_text': all_text}
records.append(a )
return records
| 365
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 0
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowerCamelCase__ = logging.get_logger(__name__)
@dataclass
class A__ :
lowercase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
lowercase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowercase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.task_name.lower()
class A__ ( __magic_name__ ):
lowercase = 'train'
lowercase = 'dev'
lowercase = 'test'
class A__ ( __magic_name__ ):
lowercase = 42
lowercase = 42
lowercase = 42
def __init__( self : Optional[Any] , a : GlueDataTrainingArguments , a : PreTrainedTokenizerBase , a : Optional[int] = None , a : Union[str, Split] = Split.train , a : Optional[str] = None , ):
'''simple docstring'''
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , a , )
lowerCAmelCase__ : int = args
lowerCAmelCase__ : int = glue_processors[args.task_name]()
lowerCAmelCase__ : List[Any] = glue_output_modes[args.task_name]
if isinstance(a , a ):
try:
lowerCAmelCase__ : Optional[Any] = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCAmelCase__ : str = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
lowerCAmelCase__ : List[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase__ : List[str] = label_list[2], label_list[1]
lowerCAmelCase__ : Dict = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase__ : Any = cached_features_file + '.lock'
with FileLock(a ):
if os.path.exists(a ) and not args.overwrite_cache:
lowerCAmelCase__ : Dict = time.time()
lowerCAmelCase__ : str = torch.load(a )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
lowerCAmelCase__ : Union[str, Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase__ : Tuple = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase__ : str = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase__ : Union[str, Any] = examples[:limit_length]
lowerCAmelCase__ : List[Any] = glue_convert_examples_to_features(
a , a , max_length=args.max_seq_length , label_list=a , output_mode=self.output_mode , )
lowerCAmelCase__ : int = time.time()
torch.save(self.features , a )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Union[str, Any] , a : int ):
'''simple docstring'''
return self.features[i]
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.label_list
| 366
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowerCAmelCase__ : int = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(a ) , a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) )
lowerCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : int = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Dict = tf.constant(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : int = jnp.array(a )
self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) )
lowerCAmelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = jnp.array(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) )
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[Any] = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = np.random.randn(3 , 4 )
lowerCAmelCase__ : List[str] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) )
lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) )
@require_torch
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Dict = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : str = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase__ : Union[str, Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) )
lowerCAmelCase__ : str = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase__ : Optional[Any] = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) )
@require_torch
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = np.random.randn(3 , 4 )
lowerCAmelCase__ : str = torch.tensor(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_tf
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 )
lowerCAmelCase__ : Any = tf.constant(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_flax
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = np.random.randn(3 , 4 )
lowerCAmelCase__ : Tuple = jnp.array(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
| 307
| 0
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = WavaVecaPhonemeCTCTokenizer
lowercase = False
def _lowerCamelCase ( self : str ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : str = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
lowerCAmelCase__ : Dict = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : Optional[int] = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
def _lowerCamelCase ( self : Tuple , a : str , a : Any=False , a : int=20 , a : str=5 ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=a )) for i in range(len(a ) )]
lowerCAmelCase__ : Dict = list(filter(lambda a : [t[0]] == tokenizer.encode(t[1] , do_phonemize=a ) , a ) )
if max_length is not None and len(a ) > max_length:
lowerCAmelCase__ : List[Any] = toks[:max_length]
if min_length is not None and len(a ) < min_length and len(a ) > 0:
while len(a ) < min_length:
lowerCAmelCase__ : Any = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ : str = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ : List[str] = tokenizer.decode(a , clean_up_tokenization_spaces=a )
if " " not in output_txt and len(a ) > 1:
lowerCAmelCase__ : str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a )
)
if with_prefix_space:
lowerCAmelCase__ : int = ' ' + output_txt
lowerCAmelCase__ : List[Any] = tokenizer.encode(a , add_special_tokens=a )
return output_txt, output_ids
def _lowerCamelCase ( self : Optional[Any] , **a : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
lowerCAmelCase__ : Tuple = tokenizer('m xxx ɪ' , do_phonemize=a ).input_ids
self.assertEqual(a , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
lowerCAmelCase__ : Optional[int] = tokenizer('m aaa ɪ ccc' , do_phonemize=a ).input_ids
self.assertEqual(a , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
lowerCAmelCase__ : Dict = tokenizer('maɪ c' , do_phonemize=a ).input_ids
self.assertEqual(a , [3, 200] ) # mai should be <unk> (=3)
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCAmelCase__ : int = 'Hello how are you'
lowerCAmelCase__ : Any = tokenizer.phonemize(a , phonemizer_lang='en-us' )
self.assertEqual(a , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCAmelCase__ : Optional[Any] = 'Hello how are you'
lowerCAmelCase__ : str = tokenizer.phonemize(a , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(a ).input_ids , tokenizer(a , do_phonemize=a ).input_ids )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCAmelCase__ : str = 'Hello how are you'
lowerCAmelCase__ : Optional[Any] = tokenizer.phonemize(a , phonemizer_lang='en-us' )
lowerCAmelCase__ : int = tokenizer.decode(tokenizer(a ).input_ids )
self.assertEqual(a , a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCAmelCase__ : Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
lowerCAmelCase__ : Tuple = tokenizer.decode(sample_ids[0] )
lowerCAmelCase__ : List[str] = tokenizer.batch_decode(a )
self.assertEqual(a , batch_tokens[0] )
self.assertEqual(a , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
lowerCAmelCase__ : Optional[int] = 'Hello how are you'
lowerCAmelCase__ : List[Any] = tokenizer.phonemize(a , phonemizer_lang='en-us' )
self.assertEqual(a , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
lowerCAmelCase__ : List[str] = 'Hello how are you'
lowerCAmelCase__ : List[str] = tokenizer.phonemize(a , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(a ).input_ids , tokenizer(a , do_phonemize=a ).input_ids )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
lowerCAmelCase__ : int = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
lowerCAmelCase__ : List[str] = tokenizer.decode(sample_ids[0] )
lowerCAmelCase__ : Optional[Any] = tokenizer.batch_decode(a )
self.assertEqual(a , batch_tokens[0] )
self.assertEqual(a , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
lowerCAmelCase__ : str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=a )
lowerCAmelCase__ : Any = tokenizer.batch_decode(a , filter_word_delimiter_token=a )
self.assertEqual(a , batch_tokens[0] )
self.assertEqual(a , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
lowerCAmelCase__ : Optional[Any] = 'Hello how are you'
lowerCAmelCase__ : List[Any] = tokenizer.phonemize(a , phonemizer_lang='en-us' )
lowerCAmelCase__ : int = tokenizer.decode(tokenizer(a ).input_ids , filter_word_delimiter_token=a )
self.assertEqual(a , a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
lowerCAmelCase__ : Tuple = 'Hello how are you'
lowerCAmelCase__ : Any = tokenizer.phonemize(a , phonemizer_lang='en-us' )
lowerCAmelCase__ : Optional[Any] = tokenizer.decode(tokenizer(a ).input_ids , filter_word_delimiter_token=a )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=a )
lowerCAmelCase__ : int = 'Hello how are you'
lowerCAmelCase__ : Union[str, Any] = tokenizer(a , phonemizer_lang='en-us' ).input_ids
lowerCAmelCase__ : List[str] = tokenizer(a , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(a , a )
lowerCAmelCase__ : Any = tokenizer.decode(a )
lowerCAmelCase__ : int = tokenizer.decode(a )
self.assertEqual(a , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(a , 'ɛ l o h aʊ a ʁ j u' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCAmelCase__ : Optional[int] = 'Hello how Are you'
lowerCAmelCase__ : str = 'hello how are you'
lowerCAmelCase__ : Optional[int] = tokenizer(a ).input_ids
lowerCAmelCase__ : int = tokenizer(a ).input_ids
self.assertEqual(a , a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
lowerCAmelCase__ : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
lowerCAmelCase__ : Union[str, Any] = tokenizer.batch_decode(a )
self.assertEqual(a , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def _lowerCamelCase ( a : Optional[Any] , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = [d[key] for d in offsets]
return retrieved_list
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowerCAmelCase__ : List[str] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
lowerCAmelCase__ : Dict = tokenizer.decode(a , output_char_offsets=a , filter_word_delimiter_token=a )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(a , a ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(a : Optional[Any] , a : str ):
self.assertTrue(isinstance(a , a ) )
self.assertTrue(isinstance(outputs_list[0] , a ) )
# transform list to ModelOutput
lowerCAmelCase__ : Union[str, Any] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(a : List[Any] , a : Dict ):
if isinstance(a , a ):
[recursive_check(a , a ) for la, la in zip(a , a )]
self.assertEqual(a , a )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
lowerCAmelCase__ : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowerCAmelCase__ : int = tokenizer.batch_decode(a , output_char_offsets=a )
lowerCAmelCase__ : Optional[Any] = [tokenizer.decode(a , output_char_offsets=a ) for ids in sample_ids]
check_list_tuples_equal(a , a )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.get_tokenizers(do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ : Union[str, Any] = tokenizer.vocab_size
lowerCAmelCase__ : str = len(a )
self.assertNotEqual(a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase__ : Dict = ['aaaaa bbbbbb', 'cccccccccdddddddd']
lowerCAmelCase__ : Dict = tokenizer.add_tokens(a )
lowerCAmelCase__ : Tuple = tokenizer.vocab_size
lowerCAmelCase__ : int = len(a )
self.assertNotEqual(a , 0 )
self.assertEqual(a , a )
self.assertEqual(a , len(a ) )
self.assertEqual(a , all_size + len(a ) )
lowerCAmelCase__ : str = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=a )
self.assertGreaterEqual(len(a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCAmelCase__ : Optional[Any] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
lowerCAmelCase__ : str = tokenizer.add_special_tokens(a )
lowerCAmelCase__ : Optional[Any] = tokenizer.vocab_size
lowerCAmelCase__ : Dict = len(a )
self.assertNotEqual(a , 0 )
self.assertEqual(a , a )
self.assertEqual(a , len(a ) )
self.assertEqual(a , all_size_a + len(a ) )
lowerCAmelCase__ : Tuple = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=a )
self.assertGreaterEqual(len(a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.get_tokenizers(fast=a , do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ : Optional[int] = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
lowerCAmelCase__ : List[Any] = tokenizer.convert_tokens_to_string(a )
self.assertIsInstance(output['text'] , a )
| 367
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ = concatenate_datasets
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadManager
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 307
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Any:
lowerCAmelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ : List[Any] = ''
else:
lowerCAmelCase__ : Optional[int] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : Tuple = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowerCAmelCase__ : Tuple = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Tuple = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : List[str] = dct.pop(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = val
def lowerCAmelCase__ ( ) -> List[Any]:
lowerCAmelCase__ : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ : Tuple = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : int = ViTConfig()
lowerCAmelCase__ : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : Optional[int] = int(vit_name[-12:-10] )
lowerCAmelCase__ : List[Any] = int(vit_name[-9:-6] )
else:
lowerCAmelCase__ : int = 1_000
lowerCAmelCase__ : Union[str, Any] = 'huggingface/label-files'
lowerCAmelCase__ : List[Any] = 'imagenet-1k-id2label.json'
lowerCAmelCase__ : Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ : Tuple = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = idalabel
lowerCAmelCase__ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = int(vit_name[-6:-4] )
lowerCAmelCase__ : int = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
lowerCAmelCase__ : Dict = 192
lowerCAmelCase__ : List[Any] = 768
lowerCAmelCase__ : Union[str, Any] = 12
lowerCAmelCase__ : Union[str, Any] = 3
elif vit_name[9:].startswith('small' ):
lowerCAmelCase__ : int = 384
lowerCAmelCase__ : List[str] = 1_536
lowerCAmelCase__ : Any = 12
lowerCAmelCase__ : int = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
lowerCAmelCase__ : Dict = 768
lowerCAmelCase__ : List[str] = 2_304
lowerCAmelCase__ : Optional[int] = 8
lowerCAmelCase__ : Union[str, Any] = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
lowerCAmelCase__ : Optional[Any] = 1_024
lowerCAmelCase__ : Dict = 4_096
lowerCAmelCase__ : Any = 24
lowerCAmelCase__ : Union[str, Any] = 16
elif vit_name[4:].startswith('huge' ):
lowerCAmelCase__ : int = 1_280
lowerCAmelCase__ : List[str] = 5_120
lowerCAmelCase__ : Tuple = 32
lowerCAmelCase__ : List[str] = 16
# load original model from timm
lowerCAmelCase__ : Any = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : str = timm_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase__ : Tuple = ViTModel(SCREAMING_SNAKE_CASE_ ).eval()
else:
lowerCAmelCase__ : Union[str, Any] = ViTForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase__ : str = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase__ : List[Any] = ViTImageProcessor(size=config.image_size )
lowerCAmelCase__ : str = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCAmelCase__ : List[Any] = encoding['pixel_values']
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ )
if base_model:
lowerCAmelCase__ : Optional[int] = timm_model.forward_features(SCREAMING_SNAKE_CASE_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.pooler_output , atol=1e-3 )
else:
lowerCAmelCase__ : Optional[int] = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 368
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = DanceDiffusionPipeline
lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase = False
lowercase = False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase__ : Tuple = IPNDMScheduler()
lowerCAmelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a )
lowerCAmelCase__ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a )
lowerCAmelCase__ : List[Any] = pipe(**a )
lowerCAmelCase__ : List[str] = output.audios
lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase__ : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : int = output.audios
lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = torch_device
lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[int] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : str = output.audios
lowerCAmelCase__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 307
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class A__ ( __magic_name__ ):
lowercase = 'gptj'
lowercase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : str , a : Tuple=50_400 , a : List[Any]=2_048 , a : Tuple=4_096 , a : List[Any]=28 , a : Union[str, Any]=16 , a : Optional[Any]=64 , a : Any=None , a : str="gelu_new" , a : List[str]=0.0 , a : Optional[int]=0.0 , a : Tuple=0.0 , a : Dict=1E-5 , a : str=0.0_2 , a : Optional[Any]=True , a : List[Any]=50_256 , a : Optional[int]=50_256 , a : List[Any]=False , **a : Optional[int] , ):
'''simple docstring'''
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : Tuple = n_positions
lowerCAmelCase__ : Union[str, Any] = n_embd
lowerCAmelCase__ : str = n_layer
lowerCAmelCase__ : List[str] = n_head
lowerCAmelCase__ : Tuple = n_inner
lowerCAmelCase__ : Tuple = rotary_dim
lowerCAmelCase__ : Any = activation_function
lowerCAmelCase__ : str = resid_pdrop
lowerCAmelCase__ : int = embd_pdrop
lowerCAmelCase__ : Any = attn_pdrop
lowerCAmelCase__ : Optional[int] = layer_norm_epsilon
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Union[str, Any] = use_cache
lowerCAmelCase__ : Any = bos_token_id
lowerCAmelCase__ : int = eos_token_id
super().__init__(
bos_token_id=a , eos_token_id=a , tie_word_embeddings=a , **a )
class A__ ( __magic_name__ ):
def __init__( self : List[Any] , a : PretrainedConfig , a : str = "default" , a : List[PatchingSpec] = None , a : bool = False , ):
'''simple docstring'''
super().__init__(a , task=a , patching_specs=a , use_past=a )
if not getattr(self._config , 'pad_token_id' , a ):
# TODO: how to do that better?
lowerCAmelCase__ : List[Any] = 0
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(a , direction='inputs' )
lowerCAmelCase__ : Tuple = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowerCAmelCase__ : Optional[Any] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return self._config.n_layer
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return self._config.n_head
def _lowerCamelCase ( self : Tuple , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = super(a , self ).generate_dummy_inputs(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ : Optional[int] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCAmelCase__ : Dict = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCAmelCase__ : Optional[int] = seqlen + 2
lowerCAmelCase__ : Dict = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase__ : Optional[Any] = [
(torch.zeros(a ), torch.zeros(a )) for _ in range(self.num_layers )
]
lowerCAmelCase__ : List[Any] = common_inputs['attention_mask']
if self.use_past:
lowerCAmelCase__ : Union[str, Any] = ordered_inputs['attention_mask'].dtype
lowerCAmelCase__ : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(a , a , dtype=a )] , dim=1 )
return ordered_inputs
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 13
| 369
|
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 307
| 0
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class A__ ( __magic_name__ ):
lowercase = 't5'
lowercase = ['past_key_values']
lowercase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Dict , a : Dict=32_128 , a : Tuple=512 , a : str=64 , a : List[str]=2_048 , a : Optional[Any]=6 , a : Tuple=None , a : str=8 , a : int=32 , a : Dict=128 , a : List[Any]=0.1 , a : List[Any]=1E-6 , a : Dict=1.0 , a : List[str]="relu" , a : int=True , a : List[Any]=True , a : str=0 , a : Optional[int]=1 , **a : Optional[Any] , ):
'''simple docstring'''
lowerCAmelCase__ : Any = vocab_size
lowerCAmelCase__ : int = d_model
lowerCAmelCase__ : Optional[Any] = d_kv
lowerCAmelCase__ : Dict = d_ff
lowerCAmelCase__ : int = num_layers
lowerCAmelCase__ : List[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ : List[str] = num_heads
lowerCAmelCase__ : Union[str, Any] = relative_attention_num_buckets
lowerCAmelCase__ : int = relative_attention_max_distance
lowerCAmelCase__ : List[str] = dropout_rate
lowerCAmelCase__ : Dict = layer_norm_epsilon
lowerCAmelCase__ : Optional[int] = initializer_factor
lowerCAmelCase__ : Dict = feed_forward_proj
lowerCAmelCase__ : Tuple = use_cache
lowerCAmelCase__ : str = self.feed_forward_proj.split('-' )
lowerCAmelCase__ : Union[str, Any] = act_info[-1]
lowerCAmelCase__ : Any = act_info[0] == 'gated'
if len(a ) > 1 and act_info[0] != "gated" or len(a ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ : Union[str, Any] = 'gelu_new'
super().__init__(
pad_token_id=a , eos_token_id=a , is_encoder_decoder=a , **a , )
class A__ ( __magic_name__ ):
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowerCAmelCase__ : Dict = 'past_encoder_sequence + sequence'
lowerCAmelCase__ : int = {0: 'batch'}
lowerCAmelCase__ : Union[str, Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowerCAmelCase__ : List[str] = {0: 'batch', 1: 'decoder_sequence'}
lowerCAmelCase__ : Union[str, Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(a , direction='inputs' )
return common_inputs
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return 13
| 370
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A__ :
def __init__( self : List[str] , a : Any , a : Dict=13 , a : Optional[Any]=7 , a : Tuple=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=True , a : Dict=99 , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : Union[str, Any]=37 , a : Any="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Union[str, Any]=16 , a : Optional[int]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Tuple = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : int = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : List[Any] = scope
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = LlamaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Dict , a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = LlamaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , encoder_hidden_states=a , )
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] , a : Any , a : List[str] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : str , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowerCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = LlamaModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : int = type
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Dict = input_dict['input_ids']
lowerCAmelCase__ : Optional[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : Tuple = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : List[str] = 'single_label_classification'
lowerCAmelCase__ : List[Any] = input_dict['input_ids']
lowerCAmelCase__ : List[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : int = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Optional[Any] = 'multi_label_classification'
lowerCAmelCase__ : List[str] = input_dict['input_ids']
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ : Dict = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : List[Any] = LlamaModel(a )
original_model.to(a )
original_model.eval()
lowerCAmelCase__ : List[Any] = original_model(a ).last_hidden_state
lowerCAmelCase__ : str = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : Any = {'type': scaling_type, 'factor': 1_0.0}
lowerCAmelCase__ : Union[str, Any] = LlamaModel(a )
scaled_model.to(a )
scaled_model.eval()
lowerCAmelCase__ : Union[str, Any] = scaled_model(a ).last_hidden_state
lowerCAmelCase__ : Optional[int] = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowerCAmelCase__ : Any = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Dict = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : Any = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowerCAmelCase__ : str = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : str = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
lowerCAmelCase__ : int = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase__ : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCAmelCase__ : Tuple = 'Simply put, the theory of relativity states that '
lowerCAmelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCAmelCase__ : Dict = tokenizer.encode(a , return_tensors='pt' )
lowerCAmelCase__ : str = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=a )
# greedy generation outputs
lowerCAmelCase__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a )
lowerCAmelCase__ : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=a )
self.assertEqual(a , a )
| 307
| 0
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = IFPipeline
lowercase = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {'latents'}
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self._get_dummy_components()
def _lowerCamelCase ( self : Tuple , a : Any , a : List[Any]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self : int ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
self._test_save_load_local()
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
lowerCAmelCase__ : List[Any] = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=a , tokenizer=a )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
lowerCAmelCase__ : Dict = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Dict = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(a , a , a , a )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCAmelCase__ : Dict = IFImgaImgPipeline(**pipe_a.components )
lowerCAmelCase__ : Dict = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(a , a , a , a )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCAmelCase__ : Optional[int] = IFInpaintingPipeline(**pipe_a.components )
lowerCAmelCase__ : Tuple = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(a , a , a , a )
def _lowerCamelCase ( self : str , a : Any , a : Optional[Any] , a : Any , a : List[str] ):
'''simple docstring'''
_start_torch_memory_measurement()
lowerCAmelCase__ : int = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : str = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , num_inference_steps=2 , generator=a , output_type='np' , )
lowerCAmelCase__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowerCAmelCase__ : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(a , a )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase__ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a )
lowerCAmelCase__ : List[str] = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , image=a , generator=a , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase__ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(a , a )
def _lowerCamelCase ( self : Tuple , a : List[Any] , a : int , a : Optional[Any] , a : str ):
'''simple docstring'''
_start_torch_memory_measurement()
lowerCAmelCase__ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a )
lowerCAmelCase__ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : str = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , image=a , num_inference_steps=2 , generator=a , output_type='np' , )
lowerCAmelCase__ : str = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCAmelCase__ : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(a , a )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase__ : Any = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a )
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a )
lowerCAmelCase__ : Optional[Any] = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , image=a , original_image=a , generator=a , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase__ : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(a , a )
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Any , a : List[Any] , a : int ):
'''simple docstring'''
_start_torch_memory_measurement()
lowerCAmelCase__ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a )
lowerCAmelCase__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(a )
lowerCAmelCase__ : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , image=a , mask_image=a , num_inference_steps=2 , generator=a , output_type='np' , )
lowerCAmelCase__ : Optional[Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCAmelCase__ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(a , a )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase__ : Any = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a )
lowerCAmelCase__ : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a )
lowerCAmelCase__ : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(a )
lowerCAmelCase__ : List[str] = pipe_a(
prompt_embeds=a , negative_prompt_embeds=a , image=a , mask_image=a , original_image=a , generator=a , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase__ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase__ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(a , a )
def lowerCAmelCase__ ( ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 371
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A__ ( __magic_name__ ):
lowercase = 'unispeech'
def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ):
'''simple docstring'''
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : List[str] = feat_extract_norm
lowerCAmelCase__ : Optional[Any] = feat_extract_activation
lowerCAmelCase__ : str = list(a )
lowerCAmelCase__ : List[str] = list(a )
lowerCAmelCase__ : Tuple = list(a )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Any = num_conv_pos_embedding_groups
lowerCAmelCase__ : str = len(self.conv_dim )
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Any = feat_proj_dropout
lowerCAmelCase__ : List[Any] = final_dropout
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = num_ctc_classes
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Dict = do_stable_layer_norm
lowerCAmelCase__ : List[Any] = use_weighted_layer_sum
lowerCAmelCase__ : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Union[str, Any] = apply_spec_augment
lowerCAmelCase__ : Any = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Tuple = mask_time_min_masks
lowerCAmelCase__ : Optional[int] = mask_feature_prob
lowerCAmelCase__ : Optional[Any] = mask_feature_length
lowerCAmelCase__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : int = num_codevectors_per_group
lowerCAmelCase__ : Any = num_codevector_groups
lowerCAmelCase__ : Any = contrastive_logits_temperature
lowerCAmelCase__ : int = feat_quantizer_dropout
lowerCAmelCase__ : List[Any] = num_negatives
lowerCAmelCase__ : List[str] = codevector_dim
lowerCAmelCase__ : Optional[int] = proj_codevector_dim
lowerCAmelCase__ : Dict = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : Any = ctc_loss_reduction
lowerCAmelCase__ : Any = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 307
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class A__ ( __magic_name__ ):
lowercase = 'gptsan-japanese'
lowercase = [
'past_key_values',
]
lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Union[str, Any] , a : str=36_000 , a : Optional[Any]=1_280 , a : Union[str, Any]=1_024 , a : Any=8_192 , a : Dict=4_096 , a : List[str]=128 , a : int=10 , a : Dict=0 , a : int=16 , a : Dict=16 , a : str=128 , a : int=0.0 , a : Optional[int]=1E-5 , a : Union[str, Any]=False , a : Any=0.0 , a : Any="float32" , a : List[str]=False , a : Optional[int]=False , a : Dict=False , a : Dict=0.0_0_2 , a : str=False , a : List[str]=True , a : Dict=35_998 , a : Union[str, Any]=35_995 , a : Dict=35_999 , **a : Union[str, Any] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : Tuple = d_model
lowerCAmelCase__ : Optional[Any] = d_ff
lowerCAmelCase__ : str = d_ext
lowerCAmelCase__ : str = d_spout
lowerCAmelCase__ : Any = num_switch_layers
lowerCAmelCase__ : List[Any] = num_ext_layers
lowerCAmelCase__ : List[str] = num_switch_layers + num_ext_layers
lowerCAmelCase__ : Union[str, Any] = num_heads
lowerCAmelCase__ : Dict = num_experts
lowerCAmelCase__ : int = expert_capacity
lowerCAmelCase__ : List[str] = dropout_rate
lowerCAmelCase__ : List[Any] = layer_norm_epsilon
lowerCAmelCase__ : Dict = router_bias
lowerCAmelCase__ : Union[str, Any] = router_jitter_noise
lowerCAmelCase__ : Tuple = router_dtype
lowerCAmelCase__ : Tuple = router_ignore_padding_tokens
lowerCAmelCase__ : Optional[int] = output_hidden_states
lowerCAmelCase__ : Union[str, Any] = output_attentions
lowerCAmelCase__ : str = initializer_factor
lowerCAmelCase__ : Optional[Any] = output_router_logits
lowerCAmelCase__ : Optional[Any] = use_cache
super().__init__(
separator_token_id=a , pad_token_id=a , eos_token_id=a , **a , )
| 350
|
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out
| 307
| 0
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class A__ ( __magic_name__ ):
lowercase = 'align_text_model'
def __init__( self : int , a : Dict=30_522 , a : int=768 , a : Tuple=12 , a : List[Any]=12 , a : List[Any]=3_072 , a : Optional[int]="gelu" , a : List[str]=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Any=2 , a : Union[str, Any]=0.0_2 , a : Any=1E-12 , a : Optional[int]=0 , a : Dict="absolute" , a : Dict=True , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**a )
lowerCAmelCase__ : str = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : Tuple = type_vocab_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Optional[Any] = layer_norm_eps
lowerCAmelCase__ : str = position_embedding_type
lowerCAmelCase__ : List[str] = use_cache
lowerCAmelCase__ : Optional[Any] = pad_token_id
@classmethod
def _lowerCamelCase ( cls : List[str] , a : Union[str, os.PathLike] , **a : Tuple ):
'''simple docstring'''
cls._set_token_in_kwargs(a )
lowerCAmelCase__ : Optional[Any] = cls.get_config_dict(a , **a )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
lowerCAmelCase__ : str = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a , **a )
class A__ ( __magic_name__ ):
lowercase = 'align_vision_model'
def __init__( self : Optional[Any] , a : int = 3 , a : int = 600 , a : float = 2.0 , a : float = 3.1 , a : int = 8 , a : List[int] = [3, 3, 5, 3, 5, 5, 3] , a : List[int] = [32, 16, 24, 40, 80, 112, 192] , a : List[int] = [16, 24, 40, 80, 112, 192, 320] , a : List[int] = [] , a : List[int] = [1, 2, 2, 2, 1, 2, 1] , a : List[int] = [1, 2, 2, 3, 3, 4, 1] , a : List[int] = [1, 6, 6, 6, 6, 6, 6] , a : float = 0.2_5 , a : str = "swish" , a : int = 2_560 , a : str = "mean" , a : float = 0.0_2 , a : float = 0.0_0_1 , a : float = 0.9_9 , a : float = 0.2 , **a : Tuple , ):
'''simple docstring'''
super().__init__(**a )
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : Tuple = image_size
lowerCAmelCase__ : Optional[int] = width_coefficient
lowerCAmelCase__ : str = depth_coefficient
lowerCAmelCase__ : List[Any] = depth_divisor
lowerCAmelCase__ : str = kernel_sizes
lowerCAmelCase__ : List[Any] = in_channels
lowerCAmelCase__ : Union[str, Any] = out_channels
lowerCAmelCase__ : Optional[int] = depthwise_padding
lowerCAmelCase__ : Dict = strides
lowerCAmelCase__ : Any = num_block_repeats
lowerCAmelCase__ : int = expand_ratios
lowerCAmelCase__ : Dict = squeeze_expansion_ratio
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Dict = hidden_dim
lowerCAmelCase__ : Tuple = pooling_type
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : List[Any] = batch_norm_eps
lowerCAmelCase__ : Tuple = batch_norm_momentum
lowerCAmelCase__ : Any = drop_connect_rate
lowerCAmelCase__ : int = sum(a ) * 4
@classmethod
def _lowerCamelCase ( cls : List[str] , a : Union[str, os.PathLike] , **a : Dict ):
'''simple docstring'''
cls._set_token_in_kwargs(a )
lowerCAmelCase__ : int = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
lowerCAmelCase__ : Optional[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a , **a )
class A__ ( __magic_name__ ):
lowercase = 'align'
lowercase = True
def __init__( self : Tuple , a : str=None , a : List[Any]=None , a : Any=640 , a : Tuple=1.0 , a : Optional[Any]=0.0_2 , **a : Any , ):
'''simple docstring'''
super().__init__(**a )
if text_config is None:
lowerCAmelCase__ : List[Any] = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
lowerCAmelCase__ : Optional[Any] = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
lowerCAmelCase__ : str = AlignTextConfig(**a )
lowerCAmelCase__ : Tuple = AlignVisionConfig(**a )
lowerCAmelCase__ : Dict = projection_dim
lowerCAmelCase__ : Tuple = temperature_init_value
lowerCAmelCase__ : List[Any] = initializer_range
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , a : AlignTextConfig , a : AlignVisionConfig , **a : List[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : List[Any] = self.text_config.to_dict()
lowerCAmelCase__ : Optional[int] = self.vision_config.to_dict()
lowerCAmelCase__ : Dict = self.__class__.model_type
return output
| 351
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCamelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase__ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = """"""
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split("""::""")
lowerCamelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase__ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = """"""
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
| 0
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 352
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 307
| 0
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f:
lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Dict = []
for dataset in encoded_datasets:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Optional[Any] = with_conta
lowerCAmelCase__ : List[str] = with_conta
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : Optional[int] = mc_label
lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset)
lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Union[str, Any] = args.max_steps
lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Optional[int] = list(model.named_parameters() )
lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : int = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
lowerCAmelCase__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ : Any = batch
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ):
lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ : Tuple = batch
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy()
lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 353
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307
| 0
|
from collections.abc import Sequence
from queue import Queue
class A__ :
def __init__( self : List[str] , a : Dict , a : str , a : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = start
lowerCAmelCase__ : Tuple = end
lowerCAmelCase__ : Optional[int] = val
lowerCAmelCase__ : List[Any] = (start + end) // 2
lowerCAmelCase__ : Dict = left
lowerCAmelCase__ : int = right
def __repr__( self : Optional[int] ):
'''simple docstring'''
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class A__ :
def __init__( self : List[str] , a : Sequence , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = collection
lowerCAmelCase__ : str = function
if self.collection:
lowerCAmelCase__ : Optional[Any] = self._build_tree(0 , len(a ) - 1 )
def _lowerCamelCase ( self : int , a : Union[str, Any] , a : str ):
'''simple docstring'''
self._update_tree(self.root , a , a )
def _lowerCamelCase ( self : int , a : Union[str, Any] , a : List[str] ):
'''simple docstring'''
return self._query_range(self.root , a , a )
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : List[str] ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(a , a , self.collection[start] )
lowerCAmelCase__ : int = (start + end) // 2
lowerCAmelCase__ : str = self._build_tree(a , a )
lowerCAmelCase__ : Optional[Any] = self._build_tree(mid + 1 , a )
return SegmentTreeNode(a , a , self.fn(left.val , right.val ) , a , a )
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] , a : Optional[Any] ):
'''simple docstring'''
if node.start == i and node.end == i:
lowerCAmelCase__ : int = val
return
if i <= node.mid:
self._update_tree(node.left , a , a )
else:
self._update_tree(node.right , a , a )
lowerCAmelCase__ : List[Any] = self.fn(node.left.val , node.right.val )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : Optional[Any] , a : Optional[Any] ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , a , a )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , a , node.mid ) , self._query_range(node.right , node.mid + 1 , a ) , )
else:
# range in right child tree
return self._query_range(node.right , a , a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
if self.root is not None:
lowerCAmelCase__ : int = Queue()
queue.put(self.root )
while not queue.empty():
lowerCAmelCase__ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
lowerCamelCase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 354
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 307
| 0
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class A__ ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
def __init__( self : List[str] , a : Union[str, Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
super().__init__(features=a )
lowerCAmelCase__ : Any = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _lowerCamelCase ( self : Dict , a : Tuple ):
'''simple docstring'''
import torch
if isinstance(a , a ) and column:
if all(
isinstance(a , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(a )
return column
def _lowerCamelCase ( self : Tuple , a : Tuple ):
'''simple docstring'''
import torch
if isinstance(a , (str, bytes, type(a )) ):
return value
elif isinstance(a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCAmelCase__ : List[str] = {}
if isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowerCAmelCase__ : List[str] = {'dtype': torch.intaa}
elif isinstance(a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCAmelCase__ : Optional[int] = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a , PIL.Image.Image ):
lowerCAmelCase__ : Dict = np.asarray(a )
return torch.tensor(a , **{**default_dtype, **self.torch_tensor_kwargs} )
def _lowerCamelCase ( self : List[str] , a : Optional[int] ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(a , '__array__' ) and not isinstance(a , torch.Tensor ):
lowerCAmelCase__ : int = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
elif isinstance(a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a ) for substruct in data_struct] )
return self._tensorize(a )
def _lowerCamelCase ( self : str , a : dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , a , map_list=a )
def _lowerCamelCase ( self : str , a : pa.Table ):
'''simple docstring'''
lowerCAmelCase__ : int = self.numpy_arrow_extractor().extract_row(a )
lowerCAmelCase__ : Tuple = self.python_features_decoder.decode_row(a )
return self.recursive_tensorize(a )
def _lowerCamelCase ( self : Optional[Any] , a : pa.Table ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.numpy_arrow_extractor().extract_column(a )
lowerCAmelCase__ : Tuple = self.python_features_decoder.decode_column(a , pa_table.column_names[0] )
lowerCAmelCase__ : Optional[Any] = self.recursive_tensorize(a )
lowerCAmelCase__ : int = self._consolidate(a )
return column
def _lowerCamelCase ( self : Dict , a : pa.Table ):
'''simple docstring'''
lowerCAmelCase__ : int = self.numpy_arrow_extractor().extract_batch(a )
lowerCAmelCase__ : List[Any] = self.python_features_decoder.decode_batch(a )
lowerCAmelCase__ : Optional[int] = self.recursive_tensorize(a )
for column_name in batch:
lowerCAmelCase__ : Union[str, Any] = self._consolidate(batch[column_name] )
return batch
| 355
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a )
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a )
lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a )
lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a )
lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy()
lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : str = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ):
'''simple docstring'''
lowerCAmelCase__ : int = np.abs((a - b) ).max()
self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
lowerCAmelCase__ : List[Any] = model_a(**a )
lowerCAmelCase__ : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a )
lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a )
lowerCAmelCase__ : List[str] = model_a(**a )
lowerCAmelCase__ : int = after_outputs[0].numpy()
lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1E-5 )
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : int = 13
lowerCAmelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' )
lowerCAmelCase__ : str = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFViTModelTester(self )
lowerCAmelCase__ : Tuple = TFBertModelTester(self )
lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
lowerCAmelCase__ : Tuple = 13
lowerCAmelCase__ : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a )
lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a )
lowerCAmelCase__ : Any = model(
input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a )
lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions
self.assertEqual(len(a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase__ : int = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase__ : List[str] = output.text_model_output.attentions
self.assertEqual(len(a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' )
lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDeiTModelTester(self )
lowerCAmelCase__ : List[str] = TFRobertaModelTester(self )
lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __magic_name__ , unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ : Dict = 13
lowerCAmelCase__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] )
lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _lowerCamelCase ( self : str , a : int , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' )
lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' )
return vision_model, text_model
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self )
lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self )
lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a )
lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' )
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
| 307
| 0
|
import math
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
lowerCAmelCase__ : List[str] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 1 / 12_345 ) -> int:
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : int = 3
while True:
lowerCAmelCase__ : Union[str, Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = int(SCREAMING_SNAKE_CASE_ )
total_partitions += 1
if check_partition_perfect(SCREAMING_SNAKE_CASE_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(SCREAMING_SNAKE_CASE_ )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 356
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f:
lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Dict = []
for dataset in encoded_datasets:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase__ : Optional[Any] = with_conta
lowerCAmelCase__ : List[str] = with_conta
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Tuple = with_conta
lowerCAmelCase__ : Optional[int] = with_conta
lowerCAmelCase__ : Optional[int] = mc_label
lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase__ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset)
lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2
lowerCAmelCase__ : Tuple = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase__ : Union[str, Any] = args.max_steps
lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase__ : Optional[int] = list(model.named_parameters() )
lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase__ : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase__ : int = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch
lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase__ : Optional[int] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ):
lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy()
lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy()
lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps
lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples
lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 307
| 0
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class A__ :
def __init__( self : Any , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Any = data
lowerCAmelCase__ : Node | None = None
class A__ :
def __init__( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Optional[Any] = None
def __iter__( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.head
while self.head:
yield node.data
lowerCAmelCase__ : Any = node.next
if node == self.head:
break
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : str ):
'''simple docstring'''
return "->".join(str(a ) for item in iter(self ) )
def _lowerCamelCase ( self : List[str] , a : Any ):
'''simple docstring'''
self.insert_nth(len(self ) , a )
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
self.insert_nth(0 , a )
def _lowerCamelCase ( self : str , a : int , a : Any ):
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
lowerCAmelCase__ : int = Node(a )
if self.head is None:
lowerCAmelCase__ : Optional[int] = new_node # first node points itself
lowerCAmelCase__ : Union[str, Any] = new_node
elif index == 0: # insert at head
lowerCAmelCase__ : Dict = self.head
lowerCAmelCase__ : str = new_node
else:
lowerCAmelCase__ : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ : List[str] = temp.next
lowerCAmelCase__ : Tuple = temp.next
lowerCAmelCase__ : Optional[Any] = new_node
if index == len(self ) - 1: # insert at tail
lowerCAmelCase__ : Optional[Any] = new_node
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.delete_nth(0 )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def _lowerCamelCase ( self : Optional[Any] , a : int = 0 ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
lowerCAmelCase__ : str = self.head
if self.head == self.tail: # just one node
lowerCAmelCase__ : str = None
elif index == 0: # delete head node
lowerCAmelCase__ : int = self.tail.next.next
lowerCAmelCase__ : Tuple = self.head.next
else:
lowerCAmelCase__ : str = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ : List[Any] = temp.next
lowerCAmelCase__ : List[str] = temp.next
lowerCAmelCase__ : Tuple = temp.next.next
if index == len(self ) - 1: # delete at tail
lowerCAmelCase__ : str = temp
return delete_node.data
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return len(self ) == 0
def lowerCAmelCase__ ( ) -> None:
lowerCAmelCase__ : Any = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE_ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE_ , i + 1 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int:
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 307
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ['input_ids', 'attention_mask']
lowercase = None
def __init__( self : Optional[Any] , a : Tuple=None , a : int=None , a : int=None , a : Any="<unk>" , a : Tuple="<s>" , a : Optional[Any]="</s>" , a : Optional[int]="<pad>" , a : Dict=False , a : Union[str, Any]=False , **a : List[Any] , ):
'''simple docstring'''
super().__init__(
a , a , tokenizer_file=a , unk_token=a , bos_token=a , eos_token=a , pad_token=a , add_prefix_space=a , clean_up_tokenization_spaces=a , **a , )
lowerCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , a ) != add_prefix_space:
lowerCAmelCase__ : Any = getattr(a , pre_tok_state.pop('type' ) )
lowerCAmelCase__ : Union[str, Any] = add_prefix_space
lowerCAmelCase__ : Dict = pre_tok_class(**a )
lowerCAmelCase__ : List[str] = add_prefix_space
def _lowerCamelCase ( self : List[str] , *a : List[Any] , **a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = kwargs.get('is_split_into_words' , a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._batch_encode_plus(*a , **a )
def _lowerCamelCase ( self : List[str] , *a : Optional[Any] , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = kwargs.get('is_split_into_words' , a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._encode_plus(*a , **a )
def _lowerCamelCase ( self : Any , a : str , a : Optional[str] = None ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self._tokenizer.model.save(a , name=a )
return tuple(a )
def _lowerCamelCase ( self : List[Any] , a : "Conversation" ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a ) + [self.eos_token_id] )
if len(a ) > self.model_max_length:
lowerCAmelCase__ : Any = input_ids[-self.model_max_length :]
return input_ids
| 358
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 307
| 0
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase__ ( ) -> Dict:
lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase__ ( ) -> Optional[Any]:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase__ ( ) -> Optional[int]:
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def lowerCAmelCase__ ( ) -> Dict:
# laplace diagonals
lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def lowerCAmelCase__ ( ) -> List[str]:
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate]
lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 359
|
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307
| 0
|
"""simple docstring"""
lowerCamelCase__ = """Input must be a string of 8 numbers plus letter"""
lowerCamelCase__ = """TRWAGMYFPDXBNJZSQVHLCKE"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[str] = F'''Expected string as input, found {type(SCREAMING_SNAKE_CASE_ ).__name__}'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = spanish_id.replace('-' , '' ).upper()
if len(SCREAMING_SNAKE_CASE_ ) != 9:
raise ValueError(SCREAMING_SNAKE_CASE_ )
try:
lowerCAmelCase__ : Dict = int(spanish_id_clean[0:8] )
lowerCAmelCase__ : int = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(SCREAMING_SNAKE_CASE_ ) from ex
if letter.isdigit():
raise ValueError(SCREAMING_SNAKE_CASE_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.