code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import sys
import turtle
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ) -> List[Any]:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
if depth == 0:
return
triangle(UpperCAmelCase_, get_mid(UpperCAmelCase_, UpperCAmelCase_ ), get_mid(UpperCAmelCase_, UpperCAmelCase_ ), depth - 1 )
triangle(UpperCAmelCase_, get_mid(UpperCAmelCase_, UpperCAmelCase_ ), get_mid(UpperCAmelCase_, UpperCAmelCase_ ), depth - 1 )
triangle(UpperCAmelCase_, get_mid(UpperCAmelCase_, UpperCAmelCase_ ), get_mid(UpperCAmelCase_, UpperCAmelCase_ ), depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__A : str = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__A : Optional[int] = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 353
|
from __future__ import annotations
from typing import Any
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Tuple = num_of_nodes
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : dict[int, int] = {}
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Dict , UpperCAmelCase_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase : Dict = self.find_component(UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase : Union[str, Any] = self.find_component(UpperCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : str = []
lowerCAmelCase : Tuple = 0
lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge
lowerCAmelCase : Optional[int] = self.m_component[u]
lowerCAmelCase : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = edge
lowerCAmelCase : Optional[Any] = self.m_component[u]
lowerCAmelCase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
lowerCAmelCase : Optional[Any] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 0
|
__A : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__A : list[bool | None] = [None] * 1000_0000
__A : Optional[Any] = True
__A : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCAmelCase : Union[str, Any] = chain(next_number(_lowerCAmelCase ) )
lowerCAmelCase : int = number_chain
while number < 10_000_000:
lowerCAmelCase : Dict = number_chain
number *= 10
return number_chain
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 10_000_000 ) -> int:
'''simple docstring'''
for i in range(1, _lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 354
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 1_000_000 ) -> int:
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = limit + 1
lowerCAmelCase : str = [0] * limit
for first_term in range(1, _UpperCAmelCase ):
for n in range(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
lowerCAmelCase : int = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCAmelCase : int = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 355
|
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
lowerCAmelCase : Any = sum(i * i for i in range(1, n + 1 ) )
lowerCAmelCase : str = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 323
| 0
|
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __A :
def __init__( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : Optional[Any]=5 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : List[Any]=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : List[Any]=None , ):
lowerCAmelCase : List[str] = parent
lowerCAmelCase : int = batch_size
lowerCAmelCase : Union[str, Any] = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : str = use_input_mask
lowerCAmelCase : Optional[int] = use_token_type_ids
lowerCAmelCase : Any = use_labels
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Any = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : Dict = type_sequence_label_size
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Optional[int] = num_labels
lowerCAmelCase : Dict = num_choices
lowerCAmelCase : Optional[Any] = scope
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = None
if self.use_input_mask:
lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : int = None
lowerCAmelCase : Optional[int] = None
if self.use_labels:
lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Dict ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowercase_ , )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
lowerCAmelCase : List[str] = FalconModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : Tuple = model(lowercase_ , attention_mask=lowercase_ )
lowerCAmelCase : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , ):
lowerCAmelCase : Dict = True
lowerCAmelCase : Any = FalconModel(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
lowerCAmelCase : str = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
lowerCAmelCase : int = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , ):
lowerCAmelCase : Dict = FalconForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : int = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , ):
lowerCAmelCase : str = True
lowerCAmelCase : Any = True
lowerCAmelCase : Union[str, Any] = FalconForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
lowerCAmelCase : List[str] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
lowerCAmelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase : int = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase : str = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
lowerCAmelCase : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
lowerCAmelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase_ : str = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Any = (FalconForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ : Optional[int] = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : Any = False
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[Any] = FalconModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def lowercase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowercase__ ( self : Dict ):
lowerCAmelCase , *lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCAmelCase : Optional[Any] = alibi
self.model_tester.create_and_check_model(lowercase_ , *lowercase_ )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Union[str, Any] = 3
lowerCAmelCase : int = input_dict['input_ids']
lowerCAmelCase : List[str] = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : List[str] = FalconForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : int ):
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Tuple = 3
lowerCAmelCase : Tuple = 'single_label_classification'
lowerCAmelCase : int = input_dict['input_ids']
lowerCAmelCase : Dict = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : int = FalconForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : List[str] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : List[str] ):
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = input_dict['input_ids']
lowerCAmelCase : List[str] = FalconForCausalLM(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : str = model(lowercase_ , use_cache=lowercase_ )
lowerCAmelCase : Tuple = input_ids.shape[0]
lowerCAmelCase : int = model._convert_to_rw_cache(result.past_key_values )
lowerCAmelCase : Optional[int] = model._convert_cache_to_standard_format(lowercase_ , lowercase_ )
for layer in range(len(lowercase_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Union[str, Any] = 3
lowerCAmelCase : Union[str, Any] = 'multi_label_classification'
lowerCAmelCase : str = input_dict['input_ids']
lowerCAmelCase : Optional[Any] = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase : List[Any] = FalconForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase : Dict = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : Tuple ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
lowerCAmelCase , lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowercase_ , 'use_cache' ):
return
lowerCAmelCase : str = model_class(lowercase_ ).to(lowercase_ )
if "use_cache" not in inputs:
lowerCAmelCase : int = True
lowerCAmelCase : Optional[int] = model(**lowercase_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCAmelCase : List[Any] = (
getattr(lowercase_ , 'decoder_layers' , lowercase_ )
or getattr(lowercase_ , 'num_decoder_layers' , lowercase_ )
or config.num_hidden_layers
)
lowerCAmelCase : Optional[Any] = getattr(lowercase_ , 'num_kv_heads' , config.num_attention_heads )
lowerCAmelCase : str = getattr(lowercase_ , 'd_model' , config.hidden_size )
lowerCAmelCase : List[Any] = embed_dim // num_attention_heads
lowerCAmelCase : Dict = outputs['past_key_values']
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = inputs['input_ids'].shape
for i in range(lowercase_ ):
if config.new_decoder_architecture:
lowerCAmelCase : str = config.num_attention_heads
elif config.multi_query:
lowerCAmelCase : List[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : str ):
lowerCAmelCase : Any = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
lowerCAmelCase : Dict = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(lowercase_ )
lowerCAmelCase : Dict = tokenizer('My favorite food is' , return_tensors='pt' ).to(lowercase_ )
lowerCAmelCase : Optional[Any] = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
lowerCAmelCase : int = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=19 )
lowerCAmelCase : Optional[Any] = tokenizer.batch_decode(lowercase_ )[0]
self.assertEqual(lowercase_ , lowercase_ )
@slow
def lowercase__ ( self : List[str] ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase : List[str] = FalconForCausalLM.from_pretrained(lowercase_ )
model.eval()
model.to(lowercase_ )
lowerCAmelCase : int = tokenizer('My favorite food is' , return_tensors='pt' ).to(lowercase_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=4 )
model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=4 )
model.generate(**lowercase_ , num_beams=2 , max_new_tokens=4 )
@slow
def lowercase__ ( self : Optional[int] ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCAmelCase : int = AutoTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase : Tuple = FalconForCausalLM.from_pretrained(lowercase_ )
model.eval()
model.to(device=lowercase_ )
lowerCAmelCase : List[Any] = tokenizer('My favorite food is' , return_tensors='pt' ).to(lowercase_ )
# Test results are the same with and without cache
lowerCAmelCase : List[str] = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=20 , use_cache=lowercase_ )
lowerCAmelCase : Any = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=20 , use_cache=lowercase_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 356
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 0.0
for coeff in reversed(_UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
__A : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 323
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Any = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __A ( A__ ):
lowerCAmelCase_ : Dict = 'realm'
def __init__( self : str , UpperCAmelCase_ : Optional[int]=30522 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : Union[str, Any]=128 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : List[Any]=8 , UpperCAmelCase_ : List[str]=3072 , UpperCAmelCase_ : Union[str, Any]="gelu_new" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Optional[int]=1E-12 , UpperCAmelCase_ : Tuple=256 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : Union[str, Any]=1E-3 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : List[str]=320 , UpperCAmelCase_ : Optional[Any]=13353718 , UpperCAmelCase_ : Optional[Any]=5000 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Optional[int]=2 , **UpperCAmelCase_ : Optional[int] , ):
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
# Common config
lowerCAmelCase : str = vocab_size
lowerCAmelCase : Optional[int] = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[Any] = retriever_proj_size
lowerCAmelCase : List[str] = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : List[str] = num_candidates
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Any = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : str = layer_norm_eps
# Reader config
lowerCAmelCase : Optional[int] = span_hidden_size
lowerCAmelCase : Any = max_span_width
lowerCAmelCase : Dict = reader_layer_norm_eps
lowerCAmelCase : Optional[Any] = reader_beam_size
lowerCAmelCase : Optional[Any] = reader_seq_len
# Retrieval config
lowerCAmelCase : Any = num_block_records
lowerCAmelCase : Optional[int] = searcher_beam_size
| 357
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , ):
lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase : str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = min_resolution
lowerCAmelCase : Any = max_resolution
lowerCAmelCase : int = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : int = do_center_crop
lowerCAmelCase : str = crop_size
def lowercase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'crop_size' ) )
def lowercase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : List[str] ):
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Dict = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : List[str] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 323
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : List[Any] = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __A ( a_ ):
lowerCAmelCase_ : Optional[int] = "deit"
def __init__( self : Tuple , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Tuple=3072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Tuple=1E-12 , UpperCAmelCase_ : Union[str, Any]=224 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[Any]=16 , **UpperCAmelCase_ : int , ):
super().__init__(**lowercase_ )
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : Tuple = hidden_act
lowerCAmelCase : Tuple = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : str = layer_norm_eps
lowerCAmelCase : Tuple = image_size
lowerCAmelCase : Tuple = patch_size
lowerCAmelCase : Optional[int] = num_channels
lowerCAmelCase : Optional[Any] = qkv_bias
lowerCAmelCase : int = encoder_stride
class __A ( a_ ):
lowerCAmelCase_ : Union[str, Any] = version.parse("1.11" )
@property
def lowercase__ ( self : str ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase__ ( self : Any ):
return 1E-4
| 358
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : Optional[Any] = ''
else:
lowerCAmelCase : Optional[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : Union[str, Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase : List[Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Optional[int] = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : List[str] = dct.pop(_UpperCAmelCase )
lowerCAmelCase : Dict = val
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : str = ViTMSNConfig()
lowerCAmelCase : str = 1_000
lowerCAmelCase : List[str] = 'datasets/huggingface/label-files'
lowerCAmelCase : int = 'imagenet-1k-id2label.json'
lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase, _UpperCAmelCase ), 'r' ) )
lowerCAmelCase : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[Any] = 384
lowerCAmelCase : List[Any] = 1_536
lowerCAmelCase : Union[str, Any] = 6
elif "l16" in checkpoint_url:
lowerCAmelCase : List[Any] = 1_024
lowerCAmelCase : Any = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Optional[int] = 16
lowerCAmelCase : Any = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase : Any = 4
elif "l7" in checkpoint_url:
lowerCAmelCase : int = 7
lowerCAmelCase : str = 1_024
lowerCAmelCase : Tuple = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Tuple = 16
lowerCAmelCase : Dict = 0.1
lowerCAmelCase : List[str] = ViTMSNModel(_UpperCAmelCase )
lowerCAmelCase : int = torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location='cpu' )['target_encoder']
lowerCAmelCase : int = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCAmelCase )
lowerCAmelCase : Tuple = create_rename_keys(_UpperCAmelCase, base_model=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase, _UpperCAmelCase, base_model=_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCAmelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase : Dict = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
lowerCAmelCase : Any = ViTImageProcessor(
size=config.image_size, image_mean=_UpperCAmelCase, image_std=_UpperCAmelCase )
lowerCAmelCase : List[Any] = image_processor(images=_UpperCAmelCase, return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase : Union[str, Any] = model(**_UpperCAmelCase )
lowerCAmelCase : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[int] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCAmelCase : Union[str, Any] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCAmelCase : Union[str, Any] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], _UpperCAmelCase, atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A : List[str] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 323
| 0
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A : Union[str, Any] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__A : Dict = direct_transformers_import(PATH_TO_TRANSFORMERS)
__A : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__A : Dict = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Dict = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"config.{attribute}" in modeling_source
or f"getattr(config, \"{attribute}\"" in modeling_source
or f"getattr(self.config, \"{attribute}\"" in modeling_source
):
lowerCAmelCase : int = True
# Deal with multi-line cases
elif (
re.search(
rf"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"", _lowercase, )
is not None
):
lowerCAmelCase : Union[str, Any] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCAmelCase : Dict = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCAmelCase : Dict = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
lowerCAmelCase : Optional[int] = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
lowerCAmelCase : Union[str, Any] = True
if not attribute_used:
lowerCAmelCase : Optional[int] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCAmelCase : str = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCAmelCase : int = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCAmelCase : int = True
elif attribute.endswith('_token_id' ):
lowerCAmelCase : Optional[int] = True
# configuration class specific cases
if not case_allowed:
lowerCAmelCase : int = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, [] )
lowerCAmelCase : int = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : int = dict(inspect.signature(config_class.__init__ ).parameters )
lowerCAmelCase : str = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
lowerCAmelCase : Tuple = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCAmelCase : List[str] = {}
if len(config_class.attribute_map ) > 0:
lowerCAmelCase : List[str] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCAmelCase : List[Any] = inspect.getsourcefile(_lowercase )
lowerCAmelCase : List[str] = os.path.dirname(_lowercase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCAmelCase : Optional[Any] = [os.path.join(_lowercase, _lowercase ) for fn in os.listdir(_lowercase ) if fn.startswith('modeling_' )]
# Get the source code strings
lowerCAmelCase : str = []
for path in modeling_paths:
if os.path.isfile(_lowercase ):
with open(_lowercase ) as fp:
modeling_sources.append(fp.read() )
lowerCAmelCase : int = []
for config_param, default_value in zip(_lowercase, _lowercase ):
# `attributes` here is all the variant names for `config_param`
lowerCAmelCase : Optional[Any] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_lowercase, _lowercase, _lowercase, _lowercase ):
unused_attributes.append(attributes[0] )
return sorted(_lowercase )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
'''simple docstring'''
lowerCAmelCase : int = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCAmelCase : Optional[int] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ), lambda _UpperCAmelCase : inspect.isclass(_lowercase )
and issubclass(_lowercase, _lowercase )
and inspect.getmodule(_lowercase ) == inspect.getmodule(_config_class ), )
]
for config_class in config_classes_in_module:
lowerCAmelCase : Union[str, Any] = check_config_attributes_being_used(_lowercase )
if len(_lowercase ) > 0:
lowerCAmelCase : str = unused_attributes
if len(_lowercase ) > 0:
lowerCAmelCase : Optional[int] = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += f"{name}: {attributes}\n"
raise ValueError(_lowercase )
if __name__ == "__main__":
check_config_attributes()
| 359
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('String lengths must match!' )
lowerCAmelCase : Tuple = 0
for chara, chara in zip(_UpperCAmelCase, _UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 0
|
__A : Optional[Any] = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__A : List[Any] = {value: key for key, value in encode_dict.items()}
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : int = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
if set(_UpperCAmelCase ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
lowerCAmelCase : str = """"""
for word in coded.split():
while len(_UpperCAmelCase ) != 0:
decoded += decode_dict[word[:5]]
lowerCAmelCase : Optional[Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 360
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__A : List[Any] = trt.Logger(trt.Logger.WARNING)
__A : Optional[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__A : List[Any] = logging.getLogger(__name__)
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__A : List[str] = parser.parse_args()
if args.tokenizer_name:
__A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__A : List[Any] = args.per_device_eval_batch_size
__A : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__A : Any = True
__A : Union[str, Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__A : List[str] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__A : Dict = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__A : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__A : str = [network.get_input(i) for i in range(network.num_inputs)]
__A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__A : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__A : List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__A : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = np.asarray(inputs['input_ids'], dtype=np.intaa )
lowerCAmelCase : Optional[int] = np.asarray(inputs['attention_mask'], dtype=np.intaa )
lowerCAmelCase : Dict = np.asarray(inputs['token_type_ids'], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), _UpperCAmelCase )
# start time
lowerCAmelCase : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase : List[str] = time.time()
lowerCAmelCase : Tuple = end_time - start_time
lowerCAmelCase : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__A : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__A : int = raw_datasets['''validation'''].column_names
__A : int = '''question''' if '''question''' in column_names else column_names[0]
__A : List[str] = '''context''' if '''context''' in column_names else column_names[1]
__A : int = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__A : str = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__A : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='only_second' if pad_on_right else 'only_first', max_length=_UpperCAmelCase, stride=args.doc_stride, return_overflowing_tokens=_UpperCAmelCase, return_offsets_mapping=_UpperCAmelCase, padding='max_length', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase : List[Any] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase : Optional[Any] = tokenized_examples.sequence_ids(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__A : int = raw_datasets['''validation''']
# Validation Feature Creation
__A : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__A : List[str] = default_data_collator
__A : int = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__A : Union[str, Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase="eval" ) -> int:
'''simple docstring'''
lowerCAmelCase : str = postprocess_qa_predictions(
examples=_UpperCAmelCase, features=_UpperCAmelCase, predictions=_UpperCAmelCase, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=_UpperCAmelCase, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase : Union[str, Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase : List[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase : Optional[Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCAmelCase, label_ids=_UpperCAmelCase )
__A : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
__A : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__A : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
__A : Union[str, Any] = 0.0
__A : Optional[Any] = 0
__A : Optional[Any] = timeit.default_timer()
__A : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
__A , __A : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__A , __A : str = outputs
__A : Optional[Any] = torch.tensor(start_logits)
__A : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__A : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__A : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__A : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__A : str = nested_truncate(all_preds, len(eval_dataset))
__A : Any = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__A : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__A : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 323
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : int = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 361
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __A ( lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = "dinat"
lowerCAmelCase_ : Dict = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : List[Any]=[3, 4, 6, 5] , UpperCAmelCase_ : Dict=[2, 4, 8, 16] , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCAmelCase_ : int=3.0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[str]=1E-5 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : str = embed_dim
lowerCAmelCase : Any = depths
lowerCAmelCase : List[Any] = len(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = num_heads
lowerCAmelCase : Tuple = kernel_size
lowerCAmelCase : List[str] = dilations
lowerCAmelCase : Any = mlp_ratio
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
lowerCAmelCase : int = layer_scale_init_value
lowerCAmelCase : Optional[Any] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Tuple = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 323
| 0
|
from collections import deque
from math import floor
from random import random
from time import time
class __A :
def __init__( self : List[str] ):
lowerCAmelCase : Any = {}
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=1 ):
if self.graph.get(snake_case__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCAmelCase : Dict = [[w, v]]
if not self.graph.get(snake_case__ ):
lowerCAmelCase : Optional[Any] = []
def lowercase__ ( self : Any ):
return list(self.graph )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] ):
if self.graph.get(snake_case__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(snake_case__ )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Tuple=-2 , UpperCAmelCase_ : Any=-1 ):
if s == d:
return []
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
if s == -2:
lowerCAmelCase : Dict = list(self.graph )[0]
stack.append(snake_case__ )
visited.append(snake_case__ )
lowerCAmelCase : int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase : Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(snake_case__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(snake_case__ ) != 0:
lowerCAmelCase : Tuple = stack[len(snake_case__ ) - 1]
else:
lowerCAmelCase : Any = ss
# check if se have reached the starting point
if len(snake_case__ ) == 0:
return visited
def lowercase__ ( self : int , UpperCAmelCase_ : Any=-1 ):
if c == -1:
lowerCAmelCase : Optional[int] = floor(random() * 10000 ) + 10
for i in range(snake_case__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCAmelCase : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(snake_case__ , snake_case__ , 1 )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=-2 ):
lowerCAmelCase : List[Any] = deque()
lowerCAmelCase : Optional[Any] = []
if s == -2:
lowerCAmelCase : Any = list(self.graph )[0]
d.append(snake_case__ )
visited.append(snake_case__ )
while d:
lowerCAmelCase : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[str] ):
lowerCAmelCase : Dict = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowercase__ ( self : List[str] , UpperCAmelCase_ : str ):
return len(self.graph[u] )
def lowercase__ ( self : int , UpperCAmelCase_ : int=-2 ):
lowerCAmelCase : Any = []
lowerCAmelCase : List[str] = []
if s == -2:
lowerCAmelCase : Union[str, Any] = list(self.graph )[0]
stack.append(snake_case__ )
visited.append(snake_case__ )
lowerCAmelCase : Optional[int] = s
lowerCAmelCase : int = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(snake_case__ ) != 0:
lowerCAmelCase : Any = stack[len(snake_case__ ) - 1]
else:
lowerCAmelCase : Tuple = ss
# check if se have reached the starting point
if len(snake_case__ ) == 0:
return sorted_nodes
def lowercase__ ( self : int ):
lowerCAmelCase : List[Any] = []
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : int = list(self.graph )[0]
stack.append(snake_case__ )
visited.append(snake_case__ )
lowerCAmelCase : List[str] = -2
lowerCAmelCase : int = []
lowerCAmelCase : Optional[Any] = s
lowerCAmelCase : int = False
lowerCAmelCase : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase : int = len(snake_case__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase : Optional[Any] = True
if len(snake_case__ ) != 0:
lowerCAmelCase : Tuple = stack[len(snake_case__ ) - 1]
else:
lowerCAmelCase : Any = False
indirect_parents.append(snake_case__ )
lowerCAmelCase : List[Any] = s
lowerCAmelCase : Any = ss
# check if se have reached the starting point
if len(snake_case__ ) == 0:
return list(snake_case__ )
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : Any = []
lowerCAmelCase : List[Any] = list(self.graph )[0]
stack.append(snake_case__ )
visited.append(snake_case__ )
lowerCAmelCase : str = -2
lowerCAmelCase : str = []
lowerCAmelCase : int = s
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase : Any = len(snake_case__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase : Optional[Any] = True
if len(snake_case__ ) != 0:
lowerCAmelCase : Optional[Any] = stack[len(snake_case__ ) - 1]
else:
lowerCAmelCase : Tuple = False
indirect_parents.append(snake_case__ )
lowerCAmelCase : Dict = s
lowerCAmelCase : List[str] = ss
# check if se have reached the starting point
if len(snake_case__ ) == 0:
return False
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Tuple=-2 , UpperCAmelCase_ : Optional[int]=-1 ):
lowerCAmelCase : Any = time()
self.dfs(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = time()
return end - begin
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : int=-2 ):
lowerCAmelCase : Any = time()
self.bfs(snake_case__ )
lowerCAmelCase : Optional[int] = time()
return end - begin
class __A :
def __init__( self : Optional[int] ):
lowerCAmelCase : int = {}
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=1 ):
if self.graph.get(snake_case__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCAmelCase : Dict = [[w, v]]
# add the other way
if self.graph.get(snake_case__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCAmelCase : Union[str, Any] = [[w, u]]
def lowercase__ ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ):
if self.graph.get(snake_case__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(snake_case__ )
# the other way round
if self.graph.get(snake_case__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(snake_case__ )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Optional[int]=-2 , UpperCAmelCase_ : Optional[int]=-1 ):
if s == d:
return []
lowerCAmelCase : Tuple = []
lowerCAmelCase : str = []
if s == -2:
lowerCAmelCase : Tuple = list(self.graph )[0]
stack.append(snake_case__ )
visited.append(snake_case__ )
lowerCAmelCase : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase : int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(snake_case__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(snake_case__ ) != 0:
lowerCAmelCase : Optional[Any] = stack[len(snake_case__ ) - 1]
else:
lowerCAmelCase : Any = ss
# check if se have reached the starting point
if len(snake_case__ ) == 0:
return visited
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : List[str]=-1 ):
if c == -1:
lowerCAmelCase : int = floor(random() * 10000 ) + 10
for i in range(snake_case__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCAmelCase : str = floor(random() * c ) + 1
if n != i:
self.add_pair(snake_case__ , snake_case__ , 1 )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : List[Any]=-2 ):
lowerCAmelCase : Union[str, Any] = deque()
lowerCAmelCase : Tuple = []
if s == -2:
lowerCAmelCase : int = list(self.graph )[0]
d.append(snake_case__ )
visited.append(snake_case__ )
while d:
lowerCAmelCase : Any = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Any ):
return len(self.graph[u] )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : int = []
lowerCAmelCase : List[str] = []
lowerCAmelCase : Optional[int] = list(self.graph )[0]
stack.append(snake_case__ )
visited.append(snake_case__ )
lowerCAmelCase : Optional[int] = -2
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : List[Any] = s
lowerCAmelCase : Dict = False
lowerCAmelCase : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase : Optional[Any] = len(snake_case__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase : Optional[Any] = True
if len(snake_case__ ) != 0:
lowerCAmelCase : List[Any] = stack[len(snake_case__ ) - 1]
else:
lowerCAmelCase : Optional[int] = False
indirect_parents.append(snake_case__ )
lowerCAmelCase : Tuple = s
lowerCAmelCase : Any = ss
# check if se have reached the starting point
if len(snake_case__ ) == 0:
return list(snake_case__ )
def lowercase__ ( self : int ):
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : int = list(self.graph )[0]
stack.append(snake_case__ )
visited.append(snake_case__ )
lowerCAmelCase : int = -2
lowerCAmelCase : Tuple = []
lowerCAmelCase : List[str] = s
lowerCAmelCase : int = False
lowerCAmelCase : int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase : List[Any] = len(snake_case__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase : Optional[int] = True
if len(snake_case__ ) != 0:
lowerCAmelCase : Optional[int] = stack[len(snake_case__ ) - 1]
else:
lowerCAmelCase : Optional[int] = False
indirect_parents.append(snake_case__ )
lowerCAmelCase : int = s
lowerCAmelCase : List[str] = ss
# check if se have reached the starting point
if len(snake_case__ ) == 0:
return False
def lowercase__ ( self : Dict ):
return list(self.graph )
def lowercase__ ( self : Any , UpperCAmelCase_ : int=-2 , UpperCAmelCase_ : Union[str, Any]=-1 ):
lowerCAmelCase : Tuple = time()
self.dfs(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[int] = time()
return end - begin
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any]=-2 ):
lowerCAmelCase : Any = time()
self.bfs(snake_case__ )
lowerCAmelCase : str = time()
return end - begin
| 362
|
from manim import *
class __A ( lowerCAmelCase ):
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase : List[str] = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Dict = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : str = Text('CPU' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : int = [mem.copy() for i in range(4 )]
lowerCAmelCase : Union[str, Any] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = Text('GPU' , font_size=24 )
lowerCAmelCase : Tuple = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : List[str] = Text('Model' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = []
for i, rect in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Optional[Any] = fill.copy().set_fill(UpperCAmelCase_ , opacity=0.8 )
target.move_to(UpperCAmelCase_ )
model_arr.append(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ )
lowerCAmelCase : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Tuple = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Union[str, Any] = Text('Disk' , font_size=24 )
lowerCAmelCase : Optional[Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase : Optional[int] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Dict = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(UpperCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase_ )
lowerCAmelCase : str = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[Any] = Square(0.3 )
input.set_fill(UpperCAmelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase_ , buff=0.5 )
self.play(Write(UpperCAmelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase_ , buff=0.02 )
self.play(MoveToTarget(UpperCAmelCase_ ) )
self.play(FadeOut(UpperCAmelCase_ ) )
lowerCAmelCase : List[Any] = Arrow(start=UpperCAmelCase_ , end=UpperCAmelCase_ , color=UpperCAmelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCAmelCase : int = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) )
lowerCAmelCase : Optional[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCAmelCase_ ) , Circumscribe(model_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCAmelCase : Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCAmelCase : int = AnimationGroup(
FadeOut(UpperCAmelCase_ , run_time=0.5 ) , MoveToTarget(UpperCAmelCase_ , run_time=0.5 ) , FadeIn(UpperCAmelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCAmelCase : List[str] = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCAmelCase : int = a_c
lowerCAmelCase : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase_ ) , FadeOut(UpperCAmelCase_ , run_time=0.5 ) , )
lowerCAmelCase : int = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) , MoveToTarget(UpperCAmelCase_ ) )
self.wait()
| 323
| 0
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__A : str = 3
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
print('Generating primitive root of p' )
while True:
lowerCAmelCase : List[Any] = random.randrange(3, _lowercase )
if pow(_lowercase, 2, _lowercase ) == 1:
continue
if pow(_lowercase, _lowercase, _lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
'''simple docstring'''
print('Generating prime p...' )
lowerCAmelCase : int = rabin_miller.generate_large_prime(_lowercase ) # select large prime number.
lowerCAmelCase : str = primitive_root(_lowercase ) # one primitive root on modulo p.
lowerCAmelCase : Dict = random.randrange(3, _lowercase ) # private_key -> have to be greater than 2 for safety.
lowerCAmelCase : Optional[Any] = cryptomath.find_mod_inverse(pow(_lowercase, _lowercase, _lowercase ), _lowercase )
lowerCAmelCase : Optional[Any] = (key_size, e_a, e_a, p)
lowerCAmelCase : int = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> None:
'''simple docstring'''
if os.path.exists(f"{name}_pubkey.txt" ) or os.path.exists(f"{name}_privkey.txt" ):
print('\nWARNING:' )
print(
f"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
'Use a different name or delete these files and re-run this program.' )
sys.exit()
lowerCAmelCase : Optional[Any] = generate_key(_lowercase )
print(f"\nWriting public key to file {name}_pubkey.txt..." )
with open(f"{name}_pubkey.txt", 'w' ) as fo:
fo.write(f"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" )
print(f"Writing private key to file {name}_privkey.txt..." )
with open(f"{name}_privkey.txt", 'w' ) as fo:
fo.write(f"{private_key[0]},{private_key[1]}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
print('Making key files...' )
make_key_files('elgamal', 2_048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 363
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('Input must be a positive integer' )
lowerCAmelCase : List[str] = [True] * (num + 1)
lowerCAmelCase : int = 2
while p * p <= num:
if primes[p]:
for i in range(p * p, num + 1, _UpperCAmelCase ):
lowerCAmelCase : Dict = False
p += 1
return [prime for prime in range(2, num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 364
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__A : Dict = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__A : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__A : List[str] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__A : Any = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__A : Tuple = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__A : Any = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__A : List[str] = tf.keras.preprocessing.image.img_to_array(test_image)
__A : Optional[Any] = np.expand_dims(test_image, axis=0)
__A : int = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__A : Optional[int] = '''Normal'''
if result[0][0] == 1:
__A : str = '''Abnormality detected'''
| 323
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> list[str]:
'''simple docstring'''
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
lowerCAmelCase : str = number_of_bytes // partitions
lowerCAmelCase : str = []
for i in range(snake_case__ ):
lowerCAmelCase : Any = i * bytes_per_partition + 1
lowerCAmelCase : List[str] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f"{start_bytes}-{end_bytes}" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__A : str = logging.getLogger(__name__)
class __A ( lowerCAmelCase ):
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None ):
lowerCAmelCase : List[Any] = self.layer[current_layer](UpperCAmelCase_ , UpperCAmelCase_ , head_mask[current_layer] )
lowerCAmelCase : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = BertEncoderWithPabee(UpperCAmelCase_ )
self.init_weights()
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Dict = 0
def lowercase__ ( self : int , UpperCAmelCase_ : Any ):
lowerCAmelCase : int = threshold
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = patience
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Tuple = 0
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[int] = self.inference_layers_num / self.inference_instances_num
lowerCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(UpperCAmelCase_ )
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCAmelCase : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCAmelCase : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
if token_type_ids is None:
lowerCAmelCase : Union[str, Any] = torch.zeros(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = encoder_hidden_states.size()
lowerCAmelCase : Optional[int] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
lowerCAmelCase : Tuple = self.invert_attention_mask(UpperCAmelCase_ )
else:
lowerCAmelCase : List[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase : Optional[Any] = self.get_head_mask(UpperCAmelCase_ , self.config.num_hidden_layers )
lowerCAmelCase : int = self.embeddings(
input_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ )
lowerCAmelCase : List[str] = embedding_output
if self.training:
lowerCAmelCase : Tuple = []
for i in range(self.config.num_hidden_layers ):
lowerCAmelCase : Dict = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : List[str] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = output_layers[i](output_dropout(UpperCAmelCase_ ) )
res.append(UpperCAmelCase_ )
elif self.patience == 0: # Use all layers for inference
lowerCAmelCase : Union[str, Any] = self.encoder(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
lowerCAmelCase : Optional[Any] = self.pooler(encoder_outputs[0] )
lowerCAmelCase : List[Any] = [output_layers[self.config.num_hidden_layers - 1](UpperCAmelCase_ )]
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[str] = None
lowerCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCAmelCase : Union[str, Any] = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = output_layers[i](UpperCAmelCase_ )
if regression:
lowerCAmelCase : List[str] = logits.detach()
if patient_result is not None:
lowerCAmelCase : List[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCAmelCase : Any = 0
else:
lowerCAmelCase : Union[str, Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCAmelCase : Optional[Any] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCAmelCase_ ) ):
patient_counter += 1
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[Any] = logits
if patient_counter == self.patience:
break
lowerCAmelCase : Dict = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Tuple = config.num_labels
lowerCAmelCase : int = BertModelWithPabee(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase : List[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Any=None , ):
lowerCAmelCase : int = self.bert(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCAmelCase : Any = (logits[-1],)
if labels is not None:
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[int] = 0
for ix, logits_item in enumerate(UpperCAmelCase_ ):
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase : Tuple = MSELoss()
lowerCAmelCase : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase : Tuple = CrossEntropyLoss()
lowerCAmelCase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCAmelCase : Any = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCAmelCase : str = (total_loss / total_weights,) + outputs
return outputs
| 323
| 0
|
"""simple docstring"""
import re
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if len(re.findall('[ATCG]', lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG', 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = "deberta-v2"
def __init__( self : int , UpperCAmelCase_ : Dict=128100 , UpperCAmelCase_ : Optional[int]=1536 , UpperCAmelCase_ : Tuple=24 , UpperCAmelCase_ : Any=24 , UpperCAmelCase_ : Any=6144 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[int]=1E-7 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Dict=-1 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : int="gelu" , **UpperCAmelCase_ : int , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : str = type_vocab_size
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : Union[str, Any] = relative_attention
lowerCAmelCase : List[Any] = max_relative_positions
lowerCAmelCase : List[Any] = pad_token_id
lowerCAmelCase : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase_ ) == str:
lowerCAmelCase : Tuple = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase : str = pos_att_type
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : str = kwargs.get('pooler_hidden_size' , UpperCAmelCase_ )
lowerCAmelCase : Tuple = pooler_dropout
lowerCAmelCase : Union[str, Any] = pooler_hidden_act
class __A ( lowerCAmelCase ):
@property
def lowercase__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
lowerCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowercase__ ( self : int ):
return 12
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : "PreTrainedTokenizerBase" = None , ):
lowerCAmelCase : List[str] = super().generate_dummy_inputs(preprocessor=UpperCAmelCase_ , framework=UpperCAmelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 323
| 0
|
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : str = old_name
if "patch_embed" in old_name:
lowerCAmelCase : Tuple = old_name.split('.' )
if layer == "0":
lowerCAmelCase : Optional[Any] = old_name.replace('0', 'convolution1' )
elif layer == "1":
lowerCAmelCase : List[str] = old_name.replace('1', 'batchnorm_before' )
elif layer == "3":
lowerCAmelCase : Optional[Any] = old_name.replace('3', 'convolution2' )
else:
lowerCAmelCase : Any = old_name.replace('4', 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d', SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[Any] = R'''\b\d{2}\b'''
if bool(re.search(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ):
lowerCAmelCase : Union[str, Any] = re.search(r'\d\.\d\d.', SCREAMING_SNAKE_CASE__ ).group()
else:
lowerCAmelCase : str = re.search(r'\d\.\d.', SCREAMING_SNAKE_CASE__ ).group()
if int(match[0] ) < 6:
lowerCAmelCase : Any = old_name.replace(SCREAMING_SNAKE_CASE__, '' )
lowerCAmelCase : Dict = trimmed_name.replace('network', match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
lowerCAmelCase : Optional[int] = '''intermediate_stages.''' + trimmed_name
else:
lowerCAmelCase : Any = old_name.replace(SCREAMING_SNAKE_CASE__, '' )
if int(match[2] ) < num_meta4D_last_stage:
lowerCAmelCase : Union[str, Any] = trimmed_name.replace('network', 'meta4D_layers.blocks.' + match[2] )
else:
lowerCAmelCase : int = str(int(match[2] ) - num_meta4D_last_stage )
lowerCAmelCase : str = trimmed_name.replace('network', 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
lowerCAmelCase : str = trimmed_name.replace('norm1', 'layernorm1' )
elif "norm2" in old_name:
lowerCAmelCase : Dict = trimmed_name.replace('norm2', 'layernorm2' )
elif "fc1" in old_name:
lowerCAmelCase : Union[str, Any] = trimmed_name.replace('fc1', 'linear_in' )
elif "fc2" in old_name:
lowerCAmelCase : int = trimmed_name.replace('fc2', 'linear_out' )
lowerCAmelCase : Dict = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'.\d.', SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Optional[int] = old_name.replace('network', 'intermediate_stages' )
if "fc" in new_name:
lowerCAmelCase : str = new_name.replace('fc', 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowerCAmelCase : List[str] = new_name.replace('norm1', 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowerCAmelCase : List[Any] = new_name.replace('norm2', 'batchnorm_after' )
if "proj" in new_name:
lowerCAmelCase : Union[str, Any] = new_name.replace('proj', 'projection' )
if "dist_head" in new_name:
lowerCAmelCase : int = new_name.replace('dist_head', 'distillation_classifier' )
elif "head" in new_name:
lowerCAmelCase : Optional[Any] = new_name.replace('head', 'classifier' )
elif "patch_embed" in new_name:
lowerCAmelCase : Union[str, Any] = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowerCAmelCase : Optional[int] = new_name.replace('norm', 'layernorm' )
lowerCAmelCase : List[str] = '''efficientformer.''' + new_name
else:
lowerCAmelCase : List[str] = '''efficientformer.encoder.''' + new_name
return new_name
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key in checkpoint.copy().keys():
lowerCAmelCase : int = checkpoint.pop(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[Any] = val
return checkpoint
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : str = Image.open(requests.get(SCREAMING_SNAKE_CASE__, stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : Dict = torch.load(SCREAMING_SNAKE_CASE__, map_location='cpu' )['''model''']
lowerCAmelCase : List[str] = EfficientFormerConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : str = EfficientFormerForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = '''_'''.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
lowerCAmelCase : Union[str, Any] = config.depths[-1] - config.num_metaad_blocks + 1
lowerCAmelCase : Optional[int] = convert_torch_checkpoint(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCAmelCase : Tuple = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
lowerCAmelCase : Tuple = prepare_img()
lowerCAmelCase : Any = 256
lowerCAmelCase : Any = 224
lowerCAmelCase : List[Any] = EfficientFormerImageProcessor(
size={'shortest_edge': image_size}, crop_size={'height': crop_size, 'width': crop_size}, resample=pillow_resamplings['bicubic'], )
lowerCAmelCase : Dict = processor(images=SCREAMING_SNAKE_CASE__, return_tensors='pt' ).pixel_values
# original processing pipeline
lowerCAmelCase : Dict = Compose(
[
Resize(SCREAMING_SNAKE_CASE__, interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(SCREAMING_SNAKE_CASE__ ),
ToTensor(),
Normalize(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ),
] )
lowerCAmelCase : List[Any] = image_transforms(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
assert torch.allclose(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Any = model(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = outputs.logits
lowerCAmelCase : List[Any] = (1, 1_000)
if "l1" in model_name:
lowerCAmelCase : Any = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10], SCREAMING_SNAKE_CASE__, atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowerCAmelCase : Any = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10], SCREAMING_SNAKE_CASE__, atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowerCAmelCase : List[Any] = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}", commit_message='Add model', use_temp_dir=SCREAMING_SNAKE_CASE__, )
processor.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}", commit_message='Add image processor', use_temp_dir=SCREAMING_SNAKE_CASE__, )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
__A : int = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 367
|
__A : Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__A : List[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__A : Dict = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__A : Optional[int] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__A : Optional[int] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__A : Tuple = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__A : int = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__A : Optional[Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 323
| 0
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 368
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __A :
@staticmethod
def lowercase__ ( *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Tuple = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any ):
lowerCAmelCase : str = ObjectDetectionPipeline(model=UpperCamelCase__ , image_processor=UpperCamelCase__ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowercase__ ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : Optional[Any] = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(UpperCamelCase__ ) , 0 )
for detected_object in outputs:
self.assertEqual(
UpperCamelCase__ , {
'score': ANY(UpperCamelCase__ ),
'label': ANY(UpperCamelCase__ ),
'box': {'xmin': ANY(UpperCamelCase__ ), 'ymin': ANY(UpperCamelCase__ ), 'xmax': ANY(UpperCamelCase__ ), 'ymax': ANY(UpperCamelCase__ )},
} , )
import datasets
lowerCAmelCase : Any = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowerCAmelCase : List[Any] = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
lowerCAmelCase : Any = object_detector(UpperCamelCase__ , threshold=0.0 )
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for outputs in batch_outputs:
self.assertGreater(len(UpperCamelCase__ ) , 0 )
for detected_object in outputs:
self.assertEqual(
UpperCamelCase__ , {
'score': ANY(UpperCamelCase__ ),
'label': ANY(UpperCamelCase__ ),
'box': {'xmin': ANY(UpperCamelCase__ ), 'ymin': ANY(UpperCamelCase__ ), 'xmax': ANY(UpperCamelCase__ ), 'ymax': ANY(UpperCamelCase__ )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def lowercase__ ( self : str ):
pass
@require_torch
def lowercase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = 'hf-internal-testing/tiny-detr-mobilenetsv3'
lowerCAmelCase : List[str] = AutoModelForObjectDetection.from_pretrained(UpperCamelCase__ )
lowerCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(UpperCamelCase__ )
lowerCAmelCase : Tuple = ObjectDetectionPipeline(model=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
lowerCAmelCase : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] , )
lowerCAmelCase : List[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] , )
@require_torch
@slow
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = 'facebook/detr-resnet-50'
lowerCAmelCase : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(UpperCamelCase__ )
lowerCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(UpperCamelCase__ )
lowerCAmelCase : str = ObjectDetectionPipeline(model=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
lowerCAmelCase : Tuple = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
lowerCAmelCase : Optional[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : str = 'facebook/detr-resnet-50'
lowerCAmelCase : Tuple = pipeline('object-detection' , model=UpperCamelCase__ )
lowerCAmelCase : int = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
lowerCAmelCase : List[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[Any] = 0.99_85
lowerCAmelCase : str = 'facebook/detr-resnet-50'
lowerCAmelCase : Any = pipeline('object-detection' , model=UpperCamelCase__ )
lowerCAmelCase : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=UpperCamelCase__ )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = 'Narsil/layoutlmv3-finetuned-funsd'
lowerCAmelCase : Optional[int] = 0.99_93
lowerCAmelCase : Dict = pipeline('object-detection' , model=UpperCamelCase__ , threshold=UpperCamelCase__ )
lowerCAmelCase : Optional[int] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'score': 0.99_93, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.99_93, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] , )
| 369
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[Any]=37 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=None , ):
lowerCAmelCase : int = parent
lowerCAmelCase : Any = 13
lowerCAmelCase : Union[str, Any] = 7
lowerCAmelCase : List[Any] = True
lowerCAmelCase : List[str] = True
lowerCAmelCase : Tuple = True
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Tuple = 99
lowerCAmelCase : Optional[Any] = 32
lowerCAmelCase : List[str] = 2
lowerCAmelCase : str = 4
lowerCAmelCase : Optional[Any] = 37
lowerCAmelCase : List[Any] = 'gelu'
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Optional[Any] = 512
lowerCAmelCase : Dict = 16
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : Union[str, Any] = 0.02
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : List[str] = 4
lowerCAmelCase : Any = None
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = None
if self.use_input_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Dict = None
if self.use_token_type_ids:
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[str] = None
lowerCAmelCase : Any = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ):
lowerCAmelCase : List[Any] = TFRoFormerModel(config=UpperCAmelCase_ )
lowerCAmelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : str = [input_ids, input_mask]
lowerCAmelCase : Any = model(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
lowerCAmelCase : str = True
lowerCAmelCase : List[str] = TFRoFormerForCausalLM(config=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : List[str] = model(UpperCAmelCase_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = TFRoFormerForMaskedLM(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : str = self.num_labels
lowerCAmelCase : Optional[Any] = TFRoFormerForSequenceClassification(config=UpperCAmelCase_ )
lowerCAmelCase : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : Dict = self.num_choices
lowerCAmelCase : str = TFRoFormerForMultipleChoice(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : int = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Union[str, Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = self.num_labels
lowerCAmelCase : Any = TFRoFormerForTokenClassification(config=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[int] = TFRoFormerForQuestionAnswering(config=UpperCAmelCase_ )
lowerCAmelCase : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : List[str] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : Optional[Any] = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : int = False
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase__ ( self : int ):
lowerCAmelCase : List[Any] = TFRoFormerModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : Dict ):
lowerCAmelCase : str = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : Tuple = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0]
# TODO Replace vocab size
lowerCAmelCase : Any = 50000
lowerCAmelCase : str = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = tf.constant([[4, 10]] )
lowerCAmelCase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCAmelCase : int = emba(input_ids.shape )
lowerCAmelCase : str = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
def lowercase__ ( self : int ):
lowerCAmelCase : Dict = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
lowerCAmelCase : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowerCAmelCase : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : List[Any] ):
# 2,12,16,64
lowerCAmelCase : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : List[str] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCAmelCase : List[Any] = embed_positions([2, 16, 768] )[None, None, :, :]
lowerCAmelCase , lowerCAmelCase : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
| 323
| 0
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase_ : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowercase__ ( self : List[str] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Tuple = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
lowerCAmelCase : List[str] = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1E-05, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1E-05, 'token': 25506, 'token_str': ' accuser'},
] , )
lowerCAmelCase : int = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1E-05,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1E-05,
'token': 25506,
'token_str': ' accuser',
},
] , )
lowerCAmelCase : int = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2E-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9E-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : str = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
lowerCAmelCase : List[Any] = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2E-05, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS'},
] , )
lowerCAmelCase : Union[str, Any] = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2E-05,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS'},
] , )
lowerCAmelCase : str = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1E-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2E-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 13606, 'token_str': ' Clara'},
] , )
lowerCAmelCase : List[Any] = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
[
{
'score': 2.2E-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2E-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def lowercase__ ( self : Any ):
lowerCAmelCase : str = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
lowerCAmelCase : Any = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_lowercase , _lowercase )
@slow
@require_torch
def lowercase__ ( self : int ):
lowerCAmelCase : Optional[int] = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_lowercase )
@slow
@require_tf
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_lowercase )
def lowercase__ ( self : Dict , UpperCAmelCase_ : str ):
lowerCAmelCase : int = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_lowercase ) , [
{'sequence': 'My name is John', 'score': 0.0_08, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_07, 'token': 1573, 'token_str': ' Chris'},
] , )
lowerCAmelCase : Union[str, Any] = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_lowercase ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_51,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_14,
'token': 12790,
'token_str': ' Lyon',
},
] , )
lowerCAmelCase : List[str] = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_00, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_00, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : str = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
lowerCAmelCase : Tuple = None
lowerCAmelCase : Union[str, Any] = None
self.run_pipeline_test(_lowercase , [] )
@require_tf
def lowercase__ ( self : str ):
lowerCAmelCase : Optional[int] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
lowerCAmelCase : Any = None
lowerCAmelCase : str = None
self.run_pipeline_test(_lowercase , [] )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
lowerCAmelCase : Optional[int] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
lowerCAmelCase : Dict = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def lowercase__ ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : str = fill_masker.tokenizer
lowerCAmelCase : Optional[Any] = fill_masker.model
lowerCAmelCase : List[Any] = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
_lowercase , [
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
] , )
lowerCAmelCase : List[Any] = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
_lowercase , [
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
] , )
lowerCAmelCase : Dict = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
_lowercase , [
[
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
],
[
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
],
] , )
with self.assertRaises(_lowercase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_lowercase ):
fill_masker('This is' )
self.run_test_top_k(_lowercase , _lowercase )
self.run_test_targets(_lowercase , _lowercase )
self.run_test_top_k_targets(_lowercase , _lowercase )
self.fill_mask_with_duplicate_targets_and_top_k(_lowercase , _lowercase )
self.fill_mask_with_multiple_masks(_lowercase , _lowercase )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : Union[str, Any] = tokenizer.get_vocab()
lowerCAmelCase : Union[str, Any] = sorted(vocab.keys() )[:2]
# Pipeline argument
lowerCAmelCase : Dict = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase , targets=_lowercase )
lowerCAmelCase : Union[str, Any] = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
_lowercase , [
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
] , )
lowerCAmelCase : Optional[int] = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _lowercase )
lowerCAmelCase : Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_lowercase ) )
# Call argument
lowerCAmelCase : Optional[int] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
lowerCAmelCase : int = fill_masker(f"This is a {tokenizer.mask_token}" , targets=_lowercase )
self.assertEqual(
_lowercase , [
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
] , )
lowerCAmelCase : Tuple = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _lowercase )
lowerCAmelCase : str = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_lowercase ) )
# Score equivalence
lowerCAmelCase : Tuple = fill_masker(f"This is a {tokenizer.mask_token}" , targets=_lowercase )
lowerCAmelCase : Dict = [top_mask['token_str'] for top_mask in outputs]
lowerCAmelCase : List[Any] = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowercase ) == set(_lowercase ):
lowerCAmelCase : List[Any] = fill_masker(f"This is a {tokenizer.mask_token}" , targets=_lowercase )
lowerCAmelCase : str = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
# Raises with invalid
with self.assertRaises(_lowercase ):
lowerCAmelCase : Optional[int] = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_lowercase ):
lowerCAmelCase : Any = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[''] )
with self.assertRaises(_lowercase ):
lowerCAmelCase : List[str] = fill_masker(f"This is a {tokenizer.mask_token}" , targets='' )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : List[str] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase , top_k=2 )
lowerCAmelCase : List[Any] = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
_lowercase , [
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
] , )
lowerCAmelCase : List[str] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
lowerCAmelCase : Dict = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
_lowercase , [
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
] , )
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : Tuple = tokenizer.get_vocab()
lowerCAmelCase : int = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
# top_k=2, ntargets=3
lowerCAmelCase : Tuple = sorted(vocab.keys() )[:3]
lowerCAmelCase : Any = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=_lowercase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowerCAmelCase : List[str] = [el['token_str'] for el in sorted(_lowercase , key=lambda UpperCAmelCase_ : x["score"] , reverse=_lowercase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowercase ).issubset(_lowercase ):
lowerCAmelCase : int = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=_lowercase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : List[Any] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
lowerCAmelCase : Tuple = tokenizer.get_vocab()
# String duplicates + id duplicates
lowerCAmelCase : Union[str, Any] = sorted(vocab.keys() )[:3]
lowerCAmelCase : Tuple = [targets[0], targets[1], targets[0], targets[2], targets[1]]
lowerCAmelCase : Optional[int] = fill_masker(f"My name is {tokenizer.mask_token}" , targets=_lowercase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_lowercase ) , 3 )
def lowercase__ ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ):
lowerCAmelCase : List[Any] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
lowerCAmelCase : int = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
_lowercase , [
[
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
],
[
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
],
[
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
],
] , )
| 370
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase : Tuple = {'unk_token': '<unk>'}
lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
lowerCAmelCase : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Any , **UpperCAmelCase_ : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Tuple , **UpperCAmelCase_ : str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : List[str] = self.get_rust_tokenizer()
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = self.prepare_image_inputs()
lowerCAmelCase : List[str] = image_processor(UpperCAmelCase_ , return_tensors='np' )
lowerCAmelCase : int = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : Dict = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = 'lower newer'
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = 'lower newer'
lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase : Union[str, Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : Any = processor.batch_decode(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = 'lower newer'
lowerCAmelCase : Tuple = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 323
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : List[str] = argparse.ArgumentParser(add_help=a__, allow_abbrev=a__ )
# The main config parser
lowerCAmelCase : Dict = config_command_parser(a__ )
# The subparser to add commands to
lowerCAmelCase : Dict = config_parser.add_subparsers(title='subcommands', dest='subcommand' )
# Then add other parsers with the parent parser
default_command_parser(a__, parents=[parent_parser] )
update_command_parser(a__, parents=[parent_parser] )
return config_parser
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = get_config_parser()
lowerCAmelCase : Any = config_parser.parse_args()
if not hasattr(a__, 'func' ):
config_parser.print_help()
exit(1 )
# Run
args.func(a__ )
if __name__ == "__main__":
main()
| 371
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ) -> Tuple:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCAmelCase : Any = cst_fwd.get(__a, np.inf )
lowerCAmelCase : Optional[int] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCAmelCase : Optional[Any] = new_cost_f
lowerCAmelCase : List[Any] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCAmelCase : str = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = -1
lowerCAmelCase : List[Any] = set()
lowerCAmelCase : str = set()
lowerCAmelCase : List[Any] = {source: 0}
lowerCAmelCase : Union[str, Any] = {destination: 0}
lowerCAmelCase : Optional[Any] = {source: None}
lowerCAmelCase : Union[str, Any] = {destination: None}
lowerCAmelCase : PriorityQueue[Any] = PriorityQueue()
lowerCAmelCase : PriorityQueue[Any] = PriorityQueue()
lowerCAmelCase : Tuple = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCAmelCase : List[Any] = queue_forward.get()
visited_forward.add(__a )
lowerCAmelCase : Dict = queue_backward.get()
visited_backward.add(__a )
lowerCAmelCase : Dict = pass_and_relaxation(
__a, __a, __a, __a, __a, __a, __a, __a, __a, )
lowerCAmelCase : str = pass_and_relaxation(
__a, __a, __a, __a, __a, __a, __a, __a, __a, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCAmelCase : Optional[int] = shortest_distance
return shortest_path_distance
__A : Dict = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__A : Optional[int] = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase_ : Optional[List[bool]]
lowerCAmelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 323
| 0
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A :
def __init__( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=13 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Optional[int]=5 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : int=37 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Any=None , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Tuple = batch_size
lowerCAmelCase : List[Any] = image_size
lowerCAmelCase : List[str] = patch_size
lowerCAmelCase : List[Any] = num_channels
lowerCAmelCase : Union[str, Any] = is_training
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Optional[int] = num_hidden_layers
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : int = type_sequence_label_size
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase : Union[str, Any] = (image_size // patch_size) ** 2
lowerCAmelCase : List[Any] = num_patches + 1
def lowercase__ ( self : Any ):
lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[Any] = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[Any] ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ):
lowerCAmelCase : int = ViTMSNModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Any = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : Any = self.type_sequence_label_size
lowerCAmelCase : Tuple = ViTMSNForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Dict = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase : Any = 1
lowerCAmelCase : Tuple = ViTMSNForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
lowerCAmelCase : List[str] = config_and_inputs
lowerCAmelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCAmelCase_ : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : List[str] = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Optional[int] = False
def lowercase__ ( self : str ):
lowerCAmelCase : Tuple = ViTMSNModelTester(self )
lowerCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def lowercase__ ( self : Any ):
pass
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : str = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : int = model_class(UpperCAmelCase_ )
lowerCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : List[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Any = ViTMSNModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( ):
'''simple docstring'''
lowerCAmelCase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Any ):
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def lowercase__ ( self : Optional[Any] ):
torch.manual_seed(2 )
lowerCAmelCase : Optional[Any] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(UpperCAmelCase_ )
lowerCAmelCase : int = self.default_image_processor
lowerCAmelCase : str = prepare_img()
lowerCAmelCase : str = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**UpperCAmelCase_ )
# verify the logits
lowerCAmelCase : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
lowerCAmelCase : int = torch.tensor([-0.08_03, -0.44_54, -0.23_75] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 351
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return x + 2
class __A ( unittest.TestCase ):
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = 'x = 3'
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
lowerCAmelCase : Dict = 'x = y'
lowerCAmelCase : List[Any] = {'y': 5}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 5, 'y': 5} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = 'y = add_two(x)'
lowerCAmelCase : int = {'x': 3}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 'x = 3'
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : Any ):
lowerCAmelCase : Union[str, Any] = 'x = 3\ny = 5'
lowerCAmelCase : str = {}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = 'text = f\'This is x: {x}.\''
lowerCAmelCase : str = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'text': 'This is x: 3.'} )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = 'if x <= 3:\n y = 2\nelse:\n y = 5'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 2} )
lowerCAmelCase : Any = {'x': 8}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 8, 'y': 5} )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = 'test_list = [x, add_two(x)]'
lowerCAmelCase : Optional[Any] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [3, 5] )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : int = 'y = x'
lowerCAmelCase : Optional[int] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 3} )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = 'test_list = [x, add_two(x)]\ntest_list[1]'
lowerCAmelCase : List[str] = {'x': 3}
lowerCAmelCase : List[str] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
lowerCAmelCase : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
lowerCAmelCase : List[Any] = {'x': 3}
lowerCAmelCase : Optional[Any] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : int ):
lowerCAmelCase : Any = 'x = 0\nfor i in range(3):\n x = i'
lowerCAmelCase : str = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {'range': range} , state=UpperCAmelCase_ )
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 2, 'i': 2} )
| 323
| 0
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( __lowercase ):
lowerCAmelCase_ : Tuple = ["image_processor", "tokenizer"]
lowerCAmelCase_ : Any = "LayoutLMv2ImageProcessor"
lowerCAmelCase_ : List[str] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Tuple , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : Tuple ):
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
lowerCAmelCase : int = kwargs.pop('feature_extractor' )
lowerCAmelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case_ , snake_case_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase_ : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase_ : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : Dict , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
lowerCAmelCase : Any = self.image_processor(images=snake_case_ , return_tensors=snake_case_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(snake_case_ , snake_case_ ):
lowerCAmelCase : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase : List[Any] = features['''words''']
lowerCAmelCase : str = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel values
lowerCAmelCase : List[str] = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowerCAmelCase : Optional[Any] = self.get_overflowing_images(snake_case_ , encoded_inputs['overflow_to_sample_mapping'] )
lowerCAmelCase : List[str] = images
return encoded_inputs
def lowercase__ ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowerCAmelCase : Union[str, Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f" {len(snake_case_ )} and {len(snake_case_ )}" )
return images_with_overflow
def lowercase__ ( self : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowercase__ ( self : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowercase__ ( self : Any ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowercase__ ( self : int ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , )
return self.image_processor_class
@property
def lowercase__ ( self : Tuple ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , )
return self.image_processor
| 352
|
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCAmelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_UpperCAmelCase, 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCAmelCase : Optional[Any] = (sidea + sidea + sidea) / 2
lowerCAmelCase : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 323
| 0
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__A : List[str] = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
__A : Union[str, Any] = """hopper-medium-v2"""
__A : List[str] = gym.make(env_name)
__A : Optional[Any] = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
__A : List[str] = env.reset()
__A : Any = 0
__A : Dict = 0
__A : Optional[int] = 1000
__A : Tuple = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__A : Optional[int] = pipeline(obs, planning_horizon=32)
# execute action in environment
__A : Optional[Any] = env.step(denorm_actions)
__A : int = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
F' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
__A : Optional[Any] = next_observation
except KeyboardInterrupt:
pass
print(F'Total reward: {total_reward}')
| 353
|
from __future__ import annotations
from typing import Any
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Tuple = num_of_nodes
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : dict[int, int] = {}
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Dict , UpperCAmelCase_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase : Dict = self.find_component(UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase : Union[str, Any] = self.find_component(UpperCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : str = []
lowerCAmelCase : Tuple = 0
lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge
lowerCAmelCase : Optional[int] = self.m_component[u]
lowerCAmelCase : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = edge
lowerCAmelCase : Optional[Any] = self.m_component[u]
lowerCAmelCase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
lowerCAmelCase : Optional[Any] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 0
|
class __A ( lowercase_ ):
pass
class __A ( lowercase_ ):
pass
class __A :
def __init__( self : Tuple ):
lowerCAmelCase : List[str] = [
[],
[],
[],
]
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def lowercase__ ( self : Union[str, Any] ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self : Optional[Any] ):
return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self : Tuple ):
lowerCAmelCase : List[str] = []
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(a__ )
def lowercase__ ( self : Any ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
lowerCAmelCase : List[Any] = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self : int ):
return str(self.queue )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Dict = FixedPriorityQueue()
fpq.enqueue(0, 10 )
fpq.enqueue(1, 70 )
fpq.enqueue(0, 100 )
fpq.enqueue(2, 1 )
fpq.enqueue(2, 5 )
fpq.enqueue(1, 7 )
fpq.enqueue(2, 4 )
fpq.enqueue(1, 64 )
fpq.enqueue(0, 128 )
print(_UpperCAmelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCAmelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
'''simple docstring'''
lowerCAmelCase : Optional[int] = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_UpperCAmelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCAmelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 354
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__A : Optional[int] = _symbol_database.Default()
__A : List[Any] = _descriptor_pool.Default().AddSerializedFile(
b'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
__A : List[Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__A : str = None
__A : Optional[Any] = b'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__A : Dict = 45
__A : Optional[Any] = 1581
__A : Optional[Any] = 1517
__A : Union[str, Any] = 1570
__A : List[str] = 1584
__A : Optional[Any] = 1793
__A : Union[str, Any] = 1795
__A : List[Any] = 1916
__A : List[Any] = 1864
__A : str = 1905
__A : Any = 1919
__A : Optional[int] = 2429
__A : Tuple = 2208
__A : Dict = 2418
__A : Any = 2323
__A : Optional[Any] = 2407
# @@protoc_insertion_point(module_scope)
| 355
|
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
lowerCAmelCase : Any = sum(i * i for i in range(1, n + 1 ) )
lowerCAmelCase : str = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 323
| 0
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__A : List[Any] = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__A : Dict = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__A : Optional[Any] = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__A : str = {
'num_train_timesteps': 40,
'sigma_min': 0.0_02,
'sigma_max': 80.0,
}
__A : str = {
'num_train_timesteps': 201,
'sigma_min': 0.0_02,
'sigma_max': 80.0,
}
__A : Optional[Any] = {
'num_train_timesteps': 151,
'sigma_min': 0.0_02,
'sigma_max': 80.0,
}
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Dict = checkpoint[f"{old_prefix}.in_layers.0.weight"]
lowerCAmelCase : Dict = checkpoint[f"{old_prefix}.in_layers.0.bias"]
lowerCAmelCase : Optional[int] = checkpoint[f"{old_prefix}.in_layers.2.weight"]
lowerCAmelCase : Union[str, Any] = checkpoint[f"{old_prefix}.in_layers.2.bias"]
lowerCAmelCase : Optional[Any] = checkpoint[f"{old_prefix}.emb_layers.1.weight"]
lowerCAmelCase : List[Any] = checkpoint[f"{old_prefix}.emb_layers.1.bias"]
lowerCAmelCase : Optional[Any] = checkpoint[f"{old_prefix}.out_layers.0.weight"]
lowerCAmelCase : int = checkpoint[f"{old_prefix}.out_layers.0.bias"]
lowerCAmelCase : Optional[Any] = checkpoint[f"{old_prefix}.out_layers.3.weight"]
lowerCAmelCase : Dict = checkpoint[f"{old_prefix}.out_layers.3.bias"]
if has_skip:
lowerCAmelCase : str = checkpoint[f"{old_prefix}.skip_connection.weight"]
lowerCAmelCase : Dict = checkpoint[f"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Optional[int] = checkpoint[f"{old_prefix}.qkv.weight"].chunk(3, dim=0 )
lowerCAmelCase : List[str] = checkpoint[f"{old_prefix}.qkv.bias"].chunk(3, dim=0 )
lowerCAmelCase : Dict = checkpoint[f"{old_prefix}.norm.weight"]
lowerCAmelCase : Tuple = checkpoint[f"{old_prefix}.norm.bias"]
lowerCAmelCase : Union[str, Any] = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : Union[str, Any] = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : int = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : Any = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : int = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : int = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase : Tuple = (
checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
lowerCAmelCase : Dict = checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : List[Any] = torch.load(lowerCamelCase_, map_location='cpu' )
lowerCAmelCase : str = {}
lowerCAmelCase : Dict = checkpoint['''time_embed.0.weight''']
lowerCAmelCase : Optional[int] = checkpoint['''time_embed.0.bias''']
lowerCAmelCase : str = checkpoint['''time_embed.2.weight''']
lowerCAmelCase : Dict = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
lowerCAmelCase : List[Any] = checkpoint['''label_emb.weight''']
lowerCAmelCase : Any = checkpoint['''input_blocks.0.0.weight''']
lowerCAmelCase : Dict = checkpoint['''input_blocks.0.0.bias''']
lowerCAmelCase : Union[str, Any] = unet_config['''down_block_types''']
lowerCAmelCase : str = unet_config['''layers_per_block''']
lowerCAmelCase : Any = unet_config['''attention_head_dim''']
lowerCAmelCase : Optional[Any] = unet_config['''block_out_channels''']
lowerCAmelCase : List[str] = 1
lowerCAmelCase : Dict = channels_list[0]
for i, layer_type in enumerate(lowerCamelCase_ ):
lowerCAmelCase : str = channels_list[i]
lowerCAmelCase : Dict = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowerCamelCase_ ):
lowerCAmelCase : List[str] = f"down_blocks.{i}.resnets.{j}"
lowerCAmelCase : int = f"input_blocks.{current_layer}.0"
lowerCAmelCase : List[str] = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase : Tuple = convert_resnet(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, has_skip=lowerCamelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowerCamelCase_ ):
lowerCAmelCase : List[str] = f"down_blocks.{i}.resnets.{j}"
lowerCAmelCase : List[str] = f"input_blocks.{current_layer}.0"
lowerCAmelCase : Tuple = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase : Optional[Any] = convert_resnet(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, has_skip=lowerCamelCase_ )
lowerCAmelCase : int = f"down_blocks.{i}.attentions.{j}"
lowerCAmelCase : List[Any] = f"input_blocks.{current_layer}.1"
lowerCAmelCase : List[Any] = convert_attention(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
current_layer += 1
if i != len(lowerCamelCase_ ) - 1:
lowerCAmelCase : List[Any] = f"down_blocks.{i}.downsamplers.0"
lowerCAmelCase : List[Any] = f"input_blocks.{current_layer}.0"
lowerCAmelCase : Tuple = convert_resnet(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
current_layer += 1
lowerCAmelCase : Optional[int] = current_channels
# hardcoded the mid-block for now
lowerCAmelCase : Optional[Any] = '''mid_block.resnets.0'''
lowerCAmelCase : List[str] = '''middle_block.0'''
lowerCAmelCase : Optional[int] = convert_resnet(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
lowerCAmelCase : Dict = '''mid_block.attentions.0'''
lowerCAmelCase : List[Any] = '''middle_block.1'''
lowerCAmelCase : Dict = convert_attention(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
lowerCAmelCase : List[Any] = '''mid_block.resnets.1'''
lowerCAmelCase : Tuple = '''middle_block.2'''
lowerCAmelCase : Any = convert_resnet(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
lowerCAmelCase : str = 0
lowerCAmelCase : int = unet_config['''up_block_types''']
for i, layer_type in enumerate(lowerCamelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase : Any = f"up_blocks.{i}.resnets.{j}"
lowerCAmelCase : Dict = f"output_blocks.{current_layer}.0"
lowerCAmelCase : Dict = convert_resnet(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, has_skip=lowerCamelCase_ )
current_layer += 1
if i != len(lowerCamelCase_ ) - 1:
lowerCAmelCase : int = f"up_blocks.{i}.upsamplers.0"
lowerCAmelCase : Union[str, Any] = f"output_blocks.{current_layer-1}.1"
lowerCAmelCase : Union[str, Any] = convert_resnet(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase : Optional[Any] = f"up_blocks.{i}.resnets.{j}"
lowerCAmelCase : str = f"output_blocks.{current_layer}.0"
lowerCAmelCase : List[Any] = convert_resnet(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, has_skip=lowerCamelCase_ )
lowerCAmelCase : Optional[Any] = f"up_blocks.{i}.attentions.{j}"
lowerCAmelCase : Optional[Any] = f"output_blocks.{current_layer}.1"
lowerCAmelCase : str = convert_attention(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
current_layer += 1
if i != len(lowerCamelCase_ ) - 1:
lowerCAmelCase : Optional[Any] = f"up_blocks.{i}.upsamplers.0"
lowerCAmelCase : List[str] = f"output_blocks.{current_layer-1}.2"
lowerCAmelCase : Dict = convert_resnet(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
lowerCAmelCase : int = checkpoint['''out.0.weight''']
lowerCAmelCase : Optional[Any] = checkpoint['''out.0.bias''']
lowerCAmelCase : int = checkpoint['''out.2.weight''']
lowerCAmelCase : List[Any] = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__A : List[str] = parser.parse_args()
__A : Dict = strabool(args.class_cond)
__A : str = os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
__A : int = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__A : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__A : List[Any] = TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
__A : Optional[int] = None
__A : List[Any] = con_pt_to_diffuser(args.unet_path, unet_config)
__A : Optional[int] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__A : Dict = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__A : Optional[Any] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__A : Optional[int] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
__A : Union[str, Any] = CMStochasticIterativeScheduler(**scheduler_config)
__A : Any = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 356
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 0.0
for coeff in reversed(_UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
__A : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 323
| 0
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : List[str] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowerCAmelCase : Optional[Any] = ''
lowerCAmelCase : str = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(UpperCAmelCase_ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowerCAmelCase : int = 0, 0
# length[i] shows the length of palindromic substring with center i
lowerCAmelCase : Optional[Any] = [1 for i in range(len(UpperCAmelCase_ ) )]
# for each character in new_string find corresponding palindromic string
lowerCAmelCase : str = 0
for j in range(len(UpperCAmelCase_ ) ):
lowerCAmelCase : Optional[int] = 1 if j > r else min(length[l + r - j] // 2, r - j + 1 )
while (
j - k >= 0
and j + k < len(UpperCAmelCase_ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowerCAmelCase : Any = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowerCAmelCase : Dict = j - k + 1 # noqa: E741
lowerCAmelCase : Optional[int] = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowerCAmelCase : str = length[j]
lowerCAmelCase : Tuple = j
# create that string
lowerCAmelCase : Dict = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , ):
lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase : str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = min_resolution
lowerCAmelCase : Any = max_resolution
lowerCAmelCase : int = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : int = do_center_crop
lowerCAmelCase : str = crop_size
def lowercase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'crop_size' ) )
def lowercase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : List[str] ):
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Dict = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : List[str] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 323
| 0
|
from collections.abc import Sequence
from queue import Queue
class __A :
def __init__( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None ):
lowerCAmelCase : List[str] = start
lowerCAmelCase : List[Any] = end
lowerCAmelCase : List[Any] = val
lowerCAmelCase : Any = (start + end) // 2
lowerCAmelCase : str = left
lowerCAmelCase : str = right
def __repr__( self : List[str] ):
return f"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class __A :
def __init__( self : Any , UpperCAmelCase_ : Sequence , UpperCAmelCase_ : Dict ):
lowerCAmelCase : str = collection
lowerCAmelCase : List[str] = function
if self.collection:
lowerCAmelCase : Union[str, Any] = self._build_tree(0 , len(__snake_case ) - 1 )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ):
self._update_tree(self.root , __snake_case , __snake_case )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str ):
return self._query_range(self.root , __snake_case , __snake_case )
def lowercase__ ( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ):
if start == end:
return SegmentTreeNode(__snake_case , __snake_case , self.collection[start] )
lowerCAmelCase : List[str] = (start + end) // 2
lowerCAmelCase : Union[str, Any] = self._build_tree(__snake_case , __snake_case )
lowerCAmelCase : Dict = self._build_tree(mid + 1 , __snake_case )
return SegmentTreeNode(__snake_case , __snake_case , self.fn(left.val , right.val ) , __snake_case , __snake_case )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ):
if node.start == i and node.end == i:
lowerCAmelCase : Optional[int] = val
return
if i <= node.mid:
self._update_tree(node.left , __snake_case , __snake_case )
else:
self._update_tree(node.right , __snake_case , __snake_case )
lowerCAmelCase : str = self.fn(node.left.val , node.right.val )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __snake_case , __snake_case )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __snake_case , node.mid ) , self._query_range(node.right , node.mid + 1 , __snake_case ) , )
else:
# range in right child tree
return self._query_range(node.right , __snake_case , __snake_case )
def lowercase__ ( self : Any ):
if self.root is not None:
lowerCAmelCase : Dict = Queue()
queue.put(self.root )
while not queue.empty():
lowerCAmelCase : Union[str, Any] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__A : Union[str, Any] = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 358
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : Optional[Any] = ''
else:
lowerCAmelCase : Optional[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : Union[str, Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase : List[Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Optional[int] = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : List[str] = dct.pop(_UpperCAmelCase )
lowerCAmelCase : Dict = val
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : str = ViTMSNConfig()
lowerCAmelCase : str = 1_000
lowerCAmelCase : List[str] = 'datasets/huggingface/label-files'
lowerCAmelCase : int = 'imagenet-1k-id2label.json'
lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase, _UpperCAmelCase ), 'r' ) )
lowerCAmelCase : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[Any] = 384
lowerCAmelCase : List[Any] = 1_536
lowerCAmelCase : Union[str, Any] = 6
elif "l16" in checkpoint_url:
lowerCAmelCase : List[Any] = 1_024
lowerCAmelCase : Any = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Optional[int] = 16
lowerCAmelCase : Any = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase : Any = 4
elif "l7" in checkpoint_url:
lowerCAmelCase : int = 7
lowerCAmelCase : str = 1_024
lowerCAmelCase : Tuple = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Tuple = 16
lowerCAmelCase : Dict = 0.1
lowerCAmelCase : List[str] = ViTMSNModel(_UpperCAmelCase )
lowerCAmelCase : int = torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location='cpu' )['target_encoder']
lowerCAmelCase : int = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCAmelCase )
lowerCAmelCase : Tuple = create_rename_keys(_UpperCAmelCase, base_model=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase, _UpperCAmelCase, base_model=_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCAmelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase : Dict = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
lowerCAmelCase : Any = ViTImageProcessor(
size=config.image_size, image_mean=_UpperCAmelCase, image_std=_UpperCAmelCase )
lowerCAmelCase : List[Any] = image_processor(images=_UpperCAmelCase, return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase : Union[str, Any] = model(**_UpperCAmelCase )
lowerCAmelCase : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[int] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCAmelCase : Union[str, Any] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCAmelCase : Union[str, Any] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], _UpperCAmelCase, atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A : List[str] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 323
| 0
|
"""simple docstring"""
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = None ) -> Union[str, Any]:
'''simple docstring'''
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
lowerCAmelCase : List[Any] = nums[0]
for i in range(1, len(lowerCAmelCase__ ) ):
lowerCAmelCase : List[Any] = nums[i]
lowerCAmelCase : int = max(lowerCAmelCase__, ans + num, lowerCAmelCase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__A : int = int(input('''Enter number of elements : ''').strip())
__A : Any = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 359
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('String lengths must match!' )
lowerCAmelCase : Tuple = 0
for chara, chara in zip(_UpperCAmelCase, _UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 0
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
lowerCAmelCase : List[Any] = 10
lowerCAmelCase : str = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
lowerCAmelCase : Union[str, Any] = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(_UpperCAmelCase ) ),
}, features=_UpperCAmelCase, )
return dataset
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=_UpperCAmelCase )
return filename
# FILE_CONTENT + files
__A : str = '''\\n Text data.\n Second line of data.'''
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : List[str] = tmp_path_factory.mktemp('data' ) / 'file.txt'
lowerCAmelCase : List[str] = FILE_CONTENT
with open(_UpperCAmelCase, 'w' ) as f:
f.write(_UpperCAmelCase )
return filename
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
import bza
lowerCAmelCase : str = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
lowerCAmelCase : Any = bytes(_UpperCAmelCase, 'utf-8' )
with bza.open(_UpperCAmelCase, 'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
import gzip
lowerCAmelCase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
lowerCAmelCase : Union[str, Any] = bytes(_UpperCAmelCase, 'utf-8' )
with gzip.open(_UpperCAmelCase, 'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
lowerCAmelCase : Tuple = bytes(_UpperCAmelCase, 'utf-8' )
with lza.frame.open(_UpperCAmelCase, 'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCAmelCase : int = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(_UpperCAmelCase, 'w' ) as archive:
archive.write(_UpperCAmelCase, arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
import tarfile
lowerCAmelCase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(_UpperCAmelCase, 'w' ) as f:
f.add(_UpperCAmelCase, arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
import lzma
lowerCAmelCase : List[str] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
lowerCAmelCase : Optional[int] = bytes(_UpperCAmelCase, 'utf-8' )
with lzma.open(_UpperCAmelCase, 'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
import zipfile
lowerCAmelCase : List[str] = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(_UpperCAmelCase, 'w' ) as f:
f.write(_UpperCAmelCase, arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
lowerCAmelCase : Optional[int] = bytes(_UpperCAmelCase, 'utf-8' )
with zstd.open(_UpperCAmelCase, 'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : str = tmp_path_factory.mktemp('data' ) / 'file.xml'
lowerCAmelCase : Tuple = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(_UpperCAmelCase, 'w' ) as f:
f.write(_UpperCAmelCase )
return filename
__A : Tuple = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
__A : Dict = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
__A : Dict = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
__A : Optional[Any] = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
__A : Optional[Any] = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Optional[int] = datasets.Dataset.from_dict(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(_UpperCAmelCase ) ) as con:
lowerCAmelCase : str = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)', tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(_UpperCAmelCase, 'w', newline='' ) as f:
lowerCAmelCase : List[str] = csv.DictWriter(_UpperCAmelCase, fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : int = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(_UpperCAmelCase, 'w', newline='' ) as f:
lowerCAmelCase : int = csv.DictWriter(_UpperCAmelCase, fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> str:
'''simple docstring'''
import bza
lowerCAmelCase : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_UpperCAmelCase, 'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(_UpperCAmelCase, 'w' ) as f:
f.write(_UpperCAmelCase, arcname=os.path.basename(_UpperCAmelCase ) )
f.write(_UpperCAmelCase, arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(_UpperCAmelCase, 'w' ) as f:
f.write(_UpperCAmelCase, arcname=os.path.basename(csv_path.replace('.csv', '.CSV' ) ) )
f.write(_UpperCAmelCase, arcname=os.path.basename(csva_path.replace('.csv', '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(_UpperCAmelCase, 'w' ) as f:
f.write(_UpperCAmelCase, arcname=os.path.join('main_dir', os.path.basename(_UpperCAmelCase ) ) )
f.write(_UpperCAmelCase, arcname=os.path.join('main_dir', os.path.basename(_UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
lowerCAmelCase : Union[str, Any] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(_UpperCAmelCase, 'wb' ) as f:
lowerCAmelCase : str = pq.ParquetWriter(_UpperCAmelCase, schema=_UpperCAmelCase )
lowerCAmelCase : List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_UpperCAmelCase ) )] for k in DATA[0]}, schema=_UpperCAmelCase )
writer.write_table(_UpperCAmelCase )
writer.close()
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
lowerCAmelCase : Union[str, Any] = {'data': DATA}
with open(_UpperCAmelCase, 'w' ) as f:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
lowerCAmelCase : Optional[int] = {'data': DATA_DICT_OF_LISTS}
with open(_UpperCAmelCase, 'w' ) as f:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(_UpperCAmelCase, 'w' ) as f:
for item in DATA:
f.write(json.dumps(_UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(_UpperCAmelCase, 'w' ) as f:
for item in DATA:
f.write(json.dumps(_UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(_UpperCAmelCase, 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(_UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(_UpperCAmelCase, 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(_UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> str:
'''simple docstring'''
import gzip
lowerCAmelCase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(_UpperCAmelCase, 'rb' ) as orig_file:
with gzip.open(_UpperCAmelCase, 'wb' ) as zipped_file:
zipped_file.writelines(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
import gzip
lowerCAmelCase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(_UpperCAmelCase, 'rb' ) as orig_file:
with gzip.open(_UpperCAmelCase, 'wb' ) as zipped_file:
zipped_file.writelines(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(_UpperCAmelCase, 'w' ) as f:
f.write(_UpperCAmelCase, arcname=os.path.basename(_UpperCAmelCase ) )
f.write(_UpperCAmelCase, arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(_UpperCAmelCase, 'w' ) as f:
f.write(_UpperCAmelCase, arcname=os.path.join('nested', os.path.basename(_UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : int = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(_UpperCAmelCase, 'w' ) as f:
f.write(_UpperCAmelCase, arcname=os.path.join('main_dir', os.path.basename(_UpperCAmelCase ) ) )
f.write(_UpperCAmelCase, arcname=os.path.join('main_dir', os.path.basename(_UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(_UpperCAmelCase, 'w' ) as f:
f.add(_UpperCAmelCase, arcname=os.path.basename(_UpperCAmelCase ) )
f.add(_UpperCAmelCase, arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : Dict = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(_UpperCAmelCase, 'w' ) as f:
f.add(_UpperCAmelCase, arcname=os.path.join('nested', os.path.basename(_UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Any = ['0', '1', '2', '3']
lowerCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(_UpperCAmelCase, 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : Dict = ['0', '1', '2', '3']
lowerCAmelCase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(_UpperCAmelCase, 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Any = ['0', '1', '2', '3']
lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(_UpperCAmelCase, 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : str = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(_UpperCAmelCase, 'w' ) as f:
f.write(_UpperCAmelCase, arcname=os.path.basename(_UpperCAmelCase ) )
f.write(_UpperCAmelCase, arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : str = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(_UpperCAmelCase, 'w' ) as f:
f.write(_UpperCAmelCase, arcname=os.path.join('main_dir', os.path.basename(_UpperCAmelCase ) ) )
f.write(_UpperCAmelCase, arcname=os.path.join('main_dir', os.path.basename(_UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(_UpperCAmelCase, 'w' ) as f:
f.write(_UpperCAmelCase, arcname=os.path.basename('unsupported.ext' ) )
f.write(_UpperCAmelCase, arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : List[str] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
lowerCAmelCase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(_UpperCAmelCase, 'w', encoding='utf-8' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
return os.path.join('tests', 'features', 'data', 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
'''simple docstring'''
return os.path.join('tests', 'features', 'data', 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(_UpperCAmelCase, 'w' ) as f:
f.write(_UpperCAmelCase, arcname=os.path.basename(_UpperCAmelCase ) )
f.write(_UpperCAmelCase, arcname=os.path.basename(_UpperCAmelCase ).replace('.jpg', '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : int = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt', 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt', 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt', 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt', 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt', 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 360
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__A : List[Any] = trt.Logger(trt.Logger.WARNING)
__A : Optional[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__A : List[Any] = logging.getLogger(__name__)
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__A : List[str] = parser.parse_args()
if args.tokenizer_name:
__A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__A : List[Any] = args.per_device_eval_batch_size
__A : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__A : Any = True
__A : Union[str, Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__A : List[str] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__A : Dict = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__A : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__A : str = [network.get_input(i) for i in range(network.num_inputs)]
__A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__A : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__A : List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__A : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = np.asarray(inputs['input_ids'], dtype=np.intaa )
lowerCAmelCase : Optional[int] = np.asarray(inputs['attention_mask'], dtype=np.intaa )
lowerCAmelCase : Dict = np.asarray(inputs['token_type_ids'], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), _UpperCAmelCase )
# start time
lowerCAmelCase : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase : List[str] = time.time()
lowerCAmelCase : Tuple = end_time - start_time
lowerCAmelCase : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__A : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__A : int = raw_datasets['''validation'''].column_names
__A : int = '''question''' if '''question''' in column_names else column_names[0]
__A : List[str] = '''context''' if '''context''' in column_names else column_names[1]
__A : int = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__A : str = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__A : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='only_second' if pad_on_right else 'only_first', max_length=_UpperCAmelCase, stride=args.doc_stride, return_overflowing_tokens=_UpperCAmelCase, return_offsets_mapping=_UpperCAmelCase, padding='max_length', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase : List[Any] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase : Optional[Any] = tokenized_examples.sequence_ids(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__A : int = raw_datasets['''validation''']
# Validation Feature Creation
__A : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__A : List[str] = default_data_collator
__A : int = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__A : Union[str, Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase="eval" ) -> int:
'''simple docstring'''
lowerCAmelCase : str = postprocess_qa_predictions(
examples=_UpperCAmelCase, features=_UpperCAmelCase, predictions=_UpperCAmelCase, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=_UpperCAmelCase, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase : Union[str, Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase : List[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase : Optional[Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCAmelCase, label_ids=_UpperCAmelCase )
__A : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
__A : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__A : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
__A : Union[str, Any] = 0.0
__A : Optional[Any] = 0
__A : Optional[Any] = timeit.default_timer()
__A : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
__A , __A : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__A , __A : str = outputs
__A : Optional[Any] = torch.tensor(start_logits)
__A : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__A : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__A : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__A : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__A : str = nested_truncate(all_preds, len(eval_dataset))
__A : Any = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__A : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__A : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 323
| 0
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __A ( unittest.TestCase ):
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = '''hf-internal-testing/tiny-random-t5'''
lowerCAmelCase : str = AutoTokenizer.from_pretrained(__A )
lowerCAmelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(__A )
lowerCAmelCase : List[Any] = tokenizer('This is me' , return_tensors='pt' )
lowerCAmelCase : Optional[int] = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCAmelCase : List[str] = model.generate(**__A )
lowerCAmelCase : Optional[Any] = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
lowerCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(__A )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCAmelCase : Optional[int] = model_reloaded.generate(**__A )
self.assertTrue(torch.allclose(__A , __A ) )
def lowercase__ ( self : Any ):
lowerCAmelCase : List[Any] = '''hf-internal-testing/tiny-random-t5'''
lowerCAmelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained(__A )
lowerCAmelCase : int = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__A ):
model.save_pretrained(__A )
lowerCAmelCase : Tuple = model.reverse_bettertransformer()
model.save_pretrained(__A )
| 361
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __A ( lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = "dinat"
lowerCAmelCase_ : Dict = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : List[Any]=[3, 4, 6, 5] , UpperCAmelCase_ : Dict=[2, 4, 8, 16] , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCAmelCase_ : int=3.0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[str]=1E-5 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : str = embed_dim
lowerCAmelCase : Any = depths
lowerCAmelCase : List[Any] = len(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = num_heads
lowerCAmelCase : Tuple = kernel_size
lowerCAmelCase : List[str] = dilations
lowerCAmelCase : Any = mlp_ratio
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
lowerCAmelCase : int = layer_scale_init_value
lowerCAmelCase : Optional[Any] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Tuple = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 323
| 0
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
'''simple docstring'''
if isinstance(__snake_case, __snake_case ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(__snake_case, __snake_case ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
lowerCAmelCase : Optional[Any] = False
if num < 0:
lowerCAmelCase : Any = True
lowerCAmelCase : Tuple = -num
lowerCAmelCase : str = []
while num > 0:
binary.insert(0, num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__snake_case ) for e in binary )
return "0b" + "".join(str(__snake_case ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
from manim import *
class __A ( lowerCAmelCase ):
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase : List[str] = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Dict = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : str = Text('CPU' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : int = [mem.copy() for i in range(4 )]
lowerCAmelCase : Union[str, Any] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = Text('GPU' , font_size=24 )
lowerCAmelCase : Tuple = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : List[str] = Text('Model' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = []
for i, rect in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Optional[Any] = fill.copy().set_fill(UpperCAmelCase_ , opacity=0.8 )
target.move_to(UpperCAmelCase_ )
model_arr.append(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ )
lowerCAmelCase : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Tuple = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Union[str, Any] = Text('Disk' , font_size=24 )
lowerCAmelCase : Optional[Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase : Optional[int] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Dict = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(UpperCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase_ )
lowerCAmelCase : str = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[Any] = Square(0.3 )
input.set_fill(UpperCAmelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase_ , buff=0.5 )
self.play(Write(UpperCAmelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase_ , buff=0.02 )
self.play(MoveToTarget(UpperCAmelCase_ ) )
self.play(FadeOut(UpperCAmelCase_ ) )
lowerCAmelCase : List[Any] = Arrow(start=UpperCAmelCase_ , end=UpperCAmelCase_ , color=UpperCAmelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCAmelCase : int = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) )
lowerCAmelCase : Optional[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCAmelCase_ ) , Circumscribe(model_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCAmelCase : Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCAmelCase : int = AnimationGroup(
FadeOut(UpperCAmelCase_ , run_time=0.5 ) , MoveToTarget(UpperCAmelCase_ , run_time=0.5 ) , FadeIn(UpperCAmelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCAmelCase : List[str] = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCAmelCase : int = a_c
lowerCAmelCase : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase_ ) , FadeOut(UpperCAmelCase_ , run_time=0.5 ) , )
lowerCAmelCase : int = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) , MoveToTarget(UpperCAmelCase_ ) )
self.wait()
| 323
| 0
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__A : int = pd.read_csv('''sample_data.csv''', header=None)
__A : Dict = df.shape[:1][0]
# If you're using some other dataset input the target column
__A : Dict = df.iloc[:, 1:2]
__A : Optional[Any] = actual_data.values.reshape(len_data, 1)
__A : List[str] = MinMaxScaler().fit_transform(actual_data)
__A : List[Any] = 10
__A : Tuple = 5
__A : Dict = 20
__A : Tuple = len_data - periods * look_back
__A : Optional[int] = actual_data[:division]
__A : Optional[int] = actual_data[division - look_back :]
__A , __A : Dict = [], []
__A , __A : Optional[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__A : str = np.array(train_x)
__A : List[Any] = np.array(test_x)
__A : Dict = np.array([list(i.ravel()) for i in train_y])
__A : Optional[int] = np.array([list(i.ravel()) for i in test_y])
__A : Tuple = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
__A : Union[str, Any] = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
__A : Any = model.predict(x_test)
| 363
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A : Tuple = '''▁'''
__A : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __A ( snake_case__ , unittest.TestCase ):
lowerCAmelCase_ : List[str] = BigBirdTokenizer
lowerCAmelCase_ : int = BigBirdTokenizerFast
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Any = True
def lowercase__ ( self : List[str] ):
super().setUp()
lowerCAmelCase : Optional[Any] = self.tokenizer_class(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = '<s>'
lowerCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(_A ) , 1004 )
def lowercase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowercase__ ( self : str ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
lowerCAmelCase : Union[str, Any] = 'I was born in 92000, and this is falsé.'
lowerCAmelCase : Any = tokenizer.tokenize(_A )
lowerCAmelCase : Tuple = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
lowerCAmelCase : Any = tokenizer.encode(_A , add_special_tokens=_A )
lowerCAmelCase : str = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
lowerCAmelCase : List[str] = self.get_rust_tokenizer()
lowerCAmelCase : Tuple = tokenizer.encode(_A )
lowerCAmelCase : int = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = BigBirdTokenizer(_A , keep_accents=_A )
lowerCAmelCase : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase : Dict = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase : int = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def lowercase__ ( self : Union[str, Any] ):
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def lowercase__ ( self : str ):
lowerCAmelCase : List[Any] = 'Hello World!'
lowerCAmelCase : str = [65, 18536, 2260, 101, 66]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def lowercase__ ( self : int ):
lowerCAmelCase : int = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
lowerCAmelCase : Union[str, Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def lowercase__ ( self : Tuple ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCAmelCase : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase : List[str] = ' '.join(_A )
lowerCAmelCase : Optional[Any] = self.big_tokenizer.encode_plus(_A , return_tensors='pt' , return_token_type_ids=_A )
lowerCAmelCase : str = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=_A )
lowerCAmelCase : int = BigBirdConfig(attention_type='original_full' )
lowerCAmelCase : List[str] = BigBirdModel(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def lowercase__ ( self : Dict ):
lowerCAmelCase : int = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
lowerCAmelCase : List[Any] = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = {'input_ids': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 364
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__A : Dict = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__A : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__A : List[str] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__A : Any = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__A : Tuple = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__A : Any = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__A : List[str] = tf.keras.preprocessing.image.img_to_array(test_image)
__A : Optional[Any] = np.expand_dims(test_image, axis=0)
__A : int = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__A : Optional[int] = '''Normal'''
if result[0][0] == 1:
__A : str = '''Abnormality detected'''
| 323
| 0
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__A : List[str] = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
__A : Optional[int] = '''hopper-medium-v2'''
__A : Optional[int] = gym.make(env_name)
__A : List[str] = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
__A : List[Any] = env.reset()
__A : Optional[Any] = 0
__A : List[Any] = 0
__A : List[Any] = 1000
__A : Any = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__A : Any = pipeline(obs, planning_horizon=32)
# execute action in environment
__A , __A , __A , __A : Tuple = env.step(denorm_actions)
__A : int = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
F' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
__A : str = next_observation
except KeyboardInterrupt:
pass
print(F'Total reward: {total_reward}')
| 365
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__A : str = logging.getLogger(__name__)
class __A ( lowerCAmelCase ):
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None ):
lowerCAmelCase : List[Any] = self.layer[current_layer](UpperCAmelCase_ , UpperCAmelCase_ , head_mask[current_layer] )
lowerCAmelCase : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = BertEncoderWithPabee(UpperCAmelCase_ )
self.init_weights()
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Dict = 0
def lowercase__ ( self : int , UpperCAmelCase_ : Any ):
lowerCAmelCase : int = threshold
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = patience
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Tuple = 0
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[int] = self.inference_layers_num / self.inference_instances_num
lowerCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(UpperCAmelCase_ )
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCAmelCase : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCAmelCase : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
if token_type_ids is None:
lowerCAmelCase : Union[str, Any] = torch.zeros(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = encoder_hidden_states.size()
lowerCAmelCase : Optional[int] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
lowerCAmelCase : Tuple = self.invert_attention_mask(UpperCAmelCase_ )
else:
lowerCAmelCase : List[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase : Optional[Any] = self.get_head_mask(UpperCAmelCase_ , self.config.num_hidden_layers )
lowerCAmelCase : int = self.embeddings(
input_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ )
lowerCAmelCase : List[str] = embedding_output
if self.training:
lowerCAmelCase : Tuple = []
for i in range(self.config.num_hidden_layers ):
lowerCAmelCase : Dict = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : List[str] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = output_layers[i](output_dropout(UpperCAmelCase_ ) )
res.append(UpperCAmelCase_ )
elif self.patience == 0: # Use all layers for inference
lowerCAmelCase : Union[str, Any] = self.encoder(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
lowerCAmelCase : Optional[Any] = self.pooler(encoder_outputs[0] )
lowerCAmelCase : List[Any] = [output_layers[self.config.num_hidden_layers - 1](UpperCAmelCase_ )]
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[str] = None
lowerCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCAmelCase : Union[str, Any] = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = output_layers[i](UpperCAmelCase_ )
if regression:
lowerCAmelCase : List[str] = logits.detach()
if patient_result is not None:
lowerCAmelCase : List[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCAmelCase : Any = 0
else:
lowerCAmelCase : Union[str, Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCAmelCase : Optional[Any] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCAmelCase_ ) ):
patient_counter += 1
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[Any] = logits
if patient_counter == self.patience:
break
lowerCAmelCase : Dict = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Tuple = config.num_labels
lowerCAmelCase : int = BertModelWithPabee(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase : List[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Any=None , ):
lowerCAmelCase : int = self.bert(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCAmelCase : Any = (logits[-1],)
if labels is not None:
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[int] = 0
for ix, logits_item in enumerate(UpperCAmelCase_ ):
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase : Tuple = MSELoss()
lowerCAmelCase : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase : Tuple = CrossEntropyLoss()
lowerCAmelCase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCAmelCase : Any = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCAmelCase : str = (total_loss / total_weights,) + outputs
return outputs
| 323
| 0
|
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase : Optional[int] = cva.getAffineTransform(lowerCAmelCase__, lowerCAmelCase__ )
return cva.warpAffine(lowerCAmelCase__, lowerCAmelCase__, (rows, cols) )
if __name__ == "__main__":
# read original image
__A : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__A : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__A : int = gray_img.shape
# set different points to rotate image
__A : Union[str, Any] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__A : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__A : int = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__A : Optional[Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__A : Optional[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__A : Dict = plt.figure(1)
__A : Tuple = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 366
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = "deberta-v2"
def __init__( self : int , UpperCAmelCase_ : Dict=128100 , UpperCAmelCase_ : Optional[int]=1536 , UpperCAmelCase_ : Tuple=24 , UpperCAmelCase_ : Any=24 , UpperCAmelCase_ : Any=6144 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[int]=1E-7 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Dict=-1 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : int="gelu" , **UpperCAmelCase_ : int , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : str = type_vocab_size
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : Union[str, Any] = relative_attention
lowerCAmelCase : List[Any] = max_relative_positions
lowerCAmelCase : List[Any] = pad_token_id
lowerCAmelCase : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase_ ) == str:
lowerCAmelCase : Tuple = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase : str = pos_att_type
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : str = kwargs.get('pooler_hidden_size' , UpperCAmelCase_ )
lowerCAmelCase : Tuple = pooler_dropout
lowerCAmelCase : Union[str, Any] = pooler_hidden_act
class __A ( lowerCAmelCase ):
@property
def lowercase__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
lowerCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowercase__ ( self : int ):
return 12
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : "PreTrainedTokenizerBase" = None , ):
lowerCAmelCase : List[str] = super().generate_dummy_inputs(preprocessor=UpperCAmelCase_ , framework=UpperCAmelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 323
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __A ( unittest.TestCase ):
def __init__( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Tuple=37 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[int]=4 , ):
lowerCAmelCase : str = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : Tuple = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : str = use_attention_mask
lowerCAmelCase : Dict = use_token_type_ids
lowerCAmelCase : str = use_labels
lowerCAmelCase : str = vocab_size
lowerCAmelCase : Union[str, Any] = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : int = hidden_act
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = max_position_embeddings
lowerCAmelCase : Any = type_vocab_size
lowerCAmelCase : Dict = type_sequence_label_size
lowerCAmelCase : Optional[int] = initializer_range
lowerCAmelCase : Union[str, Any] = num_choices
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Tuple = None
if self.use_attention_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : int = None
if self.use_token_type_ids:
lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : int ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
lowerCAmelCase : List[Any] = config_and_inputs
lowerCAmelCase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __A ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase_ : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : str = FlaxAlbertModelTester(self )
@slow
def lowercase__ ( self : List[Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : str = model_class_name.from_pretrained('albert-base-v2' )
lowerCAmelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
@require_flax
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowerCAmelCase : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCAmelCase : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase : Dict = model(__lowercase , attention_mask=__lowercase )[0]
lowerCAmelCase : Optional[int] = (1, 11, 768)
self.assertEqual(output.shape , __lowercase )
lowerCAmelCase : List[str] = np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowercase , atol=1E-4 ) )
| 367
|
__A : Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__A : List[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__A : Dict = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__A : Optional[int] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__A : Optional[int] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__A : Tuple = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__A : int = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__A : Optional[Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 323
| 0
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__A : Any = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__A : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
if "://" in dataset_path:
lowerCAmelCase : Tuple = dataset_path.split('://' )[1]
return dataset_path
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : int = not is_remote_filesystem(snake_case_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(snake_case_ ), fs._strip_protocol(snake_case_ ) )
else:
fs.mv(snake_case_, snake_case_, recursive=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
if hasattr(fsspec.asyn, 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowerCAmelCase : List[str] = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Dict = threading.Lock()
| 368
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__A : Any = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 369
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[Any]=37 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=None , ):
lowerCAmelCase : int = parent
lowerCAmelCase : Any = 13
lowerCAmelCase : Union[str, Any] = 7
lowerCAmelCase : List[Any] = True
lowerCAmelCase : List[str] = True
lowerCAmelCase : Tuple = True
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Tuple = 99
lowerCAmelCase : Optional[Any] = 32
lowerCAmelCase : List[str] = 2
lowerCAmelCase : str = 4
lowerCAmelCase : Optional[Any] = 37
lowerCAmelCase : List[Any] = 'gelu'
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Optional[Any] = 512
lowerCAmelCase : Dict = 16
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : Union[str, Any] = 0.02
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : List[str] = 4
lowerCAmelCase : Any = None
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = None
if self.use_input_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Dict = None
if self.use_token_type_ids:
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[str] = None
lowerCAmelCase : Any = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ):
lowerCAmelCase : List[Any] = TFRoFormerModel(config=UpperCAmelCase_ )
lowerCAmelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : str = [input_ids, input_mask]
lowerCAmelCase : Any = model(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
lowerCAmelCase : str = True
lowerCAmelCase : List[str] = TFRoFormerForCausalLM(config=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : List[str] = model(UpperCAmelCase_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = TFRoFormerForMaskedLM(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : str = self.num_labels
lowerCAmelCase : Optional[Any] = TFRoFormerForSequenceClassification(config=UpperCAmelCase_ )
lowerCAmelCase : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : Dict = self.num_choices
lowerCAmelCase : str = TFRoFormerForMultipleChoice(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : int = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Union[str, Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = self.num_labels
lowerCAmelCase : Any = TFRoFormerForTokenClassification(config=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[int] = TFRoFormerForQuestionAnswering(config=UpperCAmelCase_ )
lowerCAmelCase : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : List[str] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : Optional[Any] = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : int = False
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase__ ( self : int ):
lowerCAmelCase : List[Any] = TFRoFormerModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : Dict ):
lowerCAmelCase : str = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : Tuple = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0]
# TODO Replace vocab size
lowerCAmelCase : Any = 50000
lowerCAmelCase : str = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = tf.constant([[4, 10]] )
lowerCAmelCase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCAmelCase : int = emba(input_ids.shape )
lowerCAmelCase : str = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
def lowercase__ ( self : int ):
lowerCAmelCase : Dict = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
lowerCAmelCase : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowerCAmelCase : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : List[Any] ):
# 2,12,16,64
lowerCAmelCase : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : List[str] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCAmelCase : List[Any] = embed_positions([2, 16, 768] )[None, None, :, :]
lowerCAmelCase , lowerCAmelCase : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
| 323
| 0
|
import qiskit
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> qiskit.result.counts.Counts:
'''simple docstring'''
lowerCAmelCase : Tuple = qiskit.Aer.get_backend('aer_simulator' )
lowerCAmelCase : Union[str, Any] = qiskit.QuantumCircuit(4, 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0, 2 )
qc_ha.cx(1, 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0, 1, 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2, 0 ) # extract XOR value
qc_ha.measure(3, 1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowerCAmelCase : Any = qiskit.execute(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A : List[Any] = half_adder(1, 1)
print(F'Half Adder Output Qubit Counts: {counts}')
| 370
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase : Tuple = {'unk_token': '<unk>'}
lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
lowerCAmelCase : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Any , **UpperCAmelCase_ : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Tuple , **UpperCAmelCase_ : str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : List[str] = self.get_rust_tokenizer()
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = self.prepare_image_inputs()
lowerCAmelCase : List[str] = image_processor(UpperCAmelCase_ , return_tensors='np' )
lowerCAmelCase : int = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : Dict = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = 'lower newer'
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = 'lower newer'
lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase : Union[str, Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : Any = processor.batch_decode(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = 'lower newer'
lowerCAmelCase : Tuple = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 323
| 0
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int = 13 , UpperCAmelCase_ : int = 64 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : int = 128 , UpperCAmelCase_ : str=[16, 32, 64, 128] , UpperCAmelCase_ : int = 7 , UpperCAmelCase_ : int = 4 , UpperCAmelCase_ : int = 37 , UpperCAmelCase_ : str = "gelu" , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = 128 , UpperCAmelCase_ : List[int] = [2, 2, 2, 2] , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , ):
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : Tuple = batch_size
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : int = patch_size
lowerCAmelCase : List[Any] = num_channels
lowerCAmelCase : str = is_training
lowerCAmelCase : Optional[int] = use_labels
lowerCAmelCase : Union[str, Any] = hidden_size
lowerCAmelCase : Dict = num_hidden_layers
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : int = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = type_sequence_label_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Any = encoder_stride
lowerCAmelCase : int = num_attention_outputs
lowerCAmelCase : Tuple = embed_dim
lowerCAmelCase : str = embed_dim + 1
lowerCAmelCase : Optional[int] = resolution
lowerCAmelCase : Tuple = depths
lowerCAmelCase : List[Any] = hidden_sizes
lowerCAmelCase : List[Any] = dim
lowerCAmelCase : Tuple = mlp_expansion_ratio
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Dict = None
if self.use_labels:
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Dict ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ):
lowerCAmelCase : Dict = TFEfficientFormerModel(config=__snake_case )
lowerCAmelCase : Any = model(__snake_case , training=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str ):
lowerCAmelCase : List[str] = self.type_sequence_label_size
lowerCAmelCase : List[str] = TFEfficientFormerForImageClassification(__snake_case )
lowerCAmelCase : Tuple = model(__snake_case , labels=__snake_case , training=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase : Any = 1
lowerCAmelCase : Any = TFEfficientFormerForImageClassification(__snake_case )
lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : Any = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : int = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = config_and_inputs
lowerCAmelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __A ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ : Union[str, Any] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : str = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Any = False
lowerCAmelCase_ : Any = False
def lowercase__ ( self : Any ):
lowerCAmelCase : Dict = TFEfficientFormerModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(
self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def lowercase__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def lowercase__ ( self : int ):
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def lowercase__ ( self : Any ):
pass
def lowercase__ ( self : int ):
lowerCAmelCase , lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[Any] = model_class(__snake_case )
lowerCAmelCase : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case )
def lowercase__ ( self : List[Any] ):
def check_hidden_states_output(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ):
lowerCAmelCase : Any = model_class(__snake_case )
lowerCAmelCase : int = model(**self._prepare_for_class(__snake_case , __snake_case ) , training=__snake_case )
lowerCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
if hasattr(self.model_tester , 'encoder_seq_length' ):
lowerCAmelCase : Optional[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
lowerCAmelCase : List[str] = seq_length * self.model_tester.chunk_length
else:
lowerCAmelCase : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowerCAmelCase : Dict = outputs.decoder_hidden_states
self.asseretIsInstance(__snake_case , (list, tuple) )
self.assertEqual(len(__snake_case ) , __snake_case )
lowerCAmelCase : str = getattr(self.model_tester , 'seq_length' , __snake_case )
lowerCAmelCase : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , __snake_case )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : List[Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=False ):
lowerCAmelCase : Dict = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def lowercase__ ( self : Any ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def lowercase__ ( self : List[Any] ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Dict = TFEfficientFormerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowercase__ ( self : str ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : str = True
lowerCAmelCase : str = getattr(self.model_tester , 'seq_length' , __snake_case )
lowerCAmelCase : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __snake_case )
lowerCAmelCase : Union[str, Any] = getattr(self.model_tester , 'key_length' , __snake_case )
lowerCAmelCase : Any = getattr(self.model_tester , 'chunk_length' , __snake_case )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
lowerCAmelCase : Any = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Dict = False
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : int = model_class(__snake_case )
lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case ) , training=__snake_case )
lowerCAmelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase : Dict = True
lowerCAmelCase : Any = model_class(__snake_case )
lowerCAmelCase : Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) , training=__snake_case )
lowerCAmelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowercase__ ( self : Dict ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCAmelCase : Tuple = model_class(__snake_case )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCAmelCase : Any = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__snake_case )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCAmelCase : int = model(__snake_case )
self.assertTrue(outputs_dict is not None )
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Any ):
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
lowerCAmelCase : Tuple = self.default_image_processor
lowerCAmelCase : List[str] = prepare_img()
lowerCAmelCase : Tuple = image_processor(images=__snake_case , return_tensors='tf' )
# forward pass
lowerCAmelCase : List[str] = model(**__snake_case , training=__snake_case )
# verify the logits
lowerCAmelCase : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCAmelCase : Optional[int] = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) )
@slow
def lowercase__ ( self : str ):
lowerCAmelCase : List[str] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
lowerCAmelCase : List[Any] = self.default_image_processor
lowerCAmelCase : Optional[Any] = prepare_img()
lowerCAmelCase : Tuple = image_processor(images=__snake_case , return_tensors='tf' )
# forward pass
lowerCAmelCase : Dict = model(**__snake_case , training=__snake_case )
# verify the logits
lowerCAmelCase : Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCAmelCase : Tuple = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) )
| 371
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
class __A :
def __init__( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] ):
# we need a list not a string, so do something to change the type
lowerCAmelCase : Optional[int] = arr.split(',' )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Tuple = [int(self.array[0] )] * len(self.array )
lowerCAmelCase : List[str] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
lowerCAmelCase : Optional[int] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
lowerCAmelCase : List[str] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__A : int = input('''please input some numbers:''')
__A : Tuple = SubArray(whole_array)
__A : Optional[int] = array.solve_sub_array()
print(('''the results is:''', re))
| 350
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase_ : Optional[List[bool]]
lowerCAmelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 323
| 0
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A : List[str] = logging.get_logger(__name__)
__A : Tuple = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCAmelCase : int = model_type_to_module_name(_UpperCAmelCase )
lowerCAmelCase : Any = importlib.import_module(f".{module_name}", 'transformers.models' )
try:
return getattr(_UpperCAmelCase, _UpperCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_UpperCAmelCase, '__name__', _UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCAmelCase : List[Any] = importlib.import_module('transformers' )
if hasattr(_UpperCAmelCase, _UpperCAmelCase ):
return getattr(_UpperCAmelCase, _UpperCAmelCase )
return None
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, **_UpperCAmelCase, ):
'''simple docstring'''
lowerCAmelCase : Any = get_file_from_repo(
_UpperCAmelCase, _UpperCAmelCase, cache_dir=_UpperCAmelCase, force_download=_UpperCAmelCase, resume_download=_UpperCAmelCase, proxies=_UpperCAmelCase, use_auth_token=_UpperCAmelCase, revision=_UpperCAmelCase, local_files_only=_UpperCAmelCase, )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_UpperCAmelCase, encoding='utf-8' ) as reader:
return json.load(_UpperCAmelCase )
class __A :
def __init__( self : Tuple ):
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase_ )
def lowercase__ ( cls : Any , UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[Any] = kwargs.pop('config' , UpperCAmelCase_ )
lowerCAmelCase : int = kwargs.pop('trust_remote_code' , UpperCAmelCase_ )
lowerCAmelCase : int = True
lowerCAmelCase : str = FeatureExtractionMixin.get_feature_extractor_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = config_dict.get('feature_extractor_type' , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
lowerCAmelCase : List[str] = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : str = AutoConfig.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# It could be in `config.feature_extractor_type``
lowerCAmelCase : Any = getattr(UpperCAmelCase_ , 'feature_extractor_type' , UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCAmelCase : Optional[int] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
lowerCAmelCase : Optional[int] = feature_extractor_class_from_name(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = feature_extractor_auto_map is not None
lowerCAmelCase : Dict = feature_extractor_class is not None or type(UpperCAmelCase_ ) in FEATURE_EXTRACTOR_MAPPING
lowerCAmelCase : Dict = resolve_trust_remote_code(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if has_remote_code and trust_remote_code:
lowerCAmelCase : Any = get_class_from_dynamic_module(
UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = kwargs.pop('code_revision' , UpperCAmelCase_ )
if os.path.isdir(UpperCAmelCase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCAmelCase_ ) in FEATURE_EXTRACTOR_MAPPING:
lowerCAmelCase : Union[str, Any] = FEATURE_EXTRACTOR_MAPPING[type(UpperCAmelCase_ )]
return feature_extractor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
raise ValueError(
f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
f"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def lowercase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ):
FEATURE_EXTRACTOR_MAPPING.register(UpperCAmelCase_ , UpperCAmelCase_ )
| 351
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return x + 2
class __A ( unittest.TestCase ):
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = 'x = 3'
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
lowerCAmelCase : Dict = 'x = y'
lowerCAmelCase : List[Any] = {'y': 5}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 5, 'y': 5} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = 'y = add_two(x)'
lowerCAmelCase : int = {'x': 3}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 'x = 3'
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : Any ):
lowerCAmelCase : Union[str, Any] = 'x = 3\ny = 5'
lowerCAmelCase : str = {}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = 'text = f\'This is x: {x}.\''
lowerCAmelCase : str = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'text': 'This is x: 3.'} )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = 'if x <= 3:\n y = 2\nelse:\n y = 5'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 2} )
lowerCAmelCase : Any = {'x': 8}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 8, 'y': 5} )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = 'test_list = [x, add_two(x)]'
lowerCAmelCase : Optional[Any] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [3, 5] )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : int = 'y = x'
lowerCAmelCase : Optional[int] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 3} )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = 'test_list = [x, add_two(x)]\ntest_list[1]'
lowerCAmelCase : List[str] = {'x': 3}
lowerCAmelCase : List[str] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
lowerCAmelCase : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
lowerCAmelCase : List[Any] = {'x': 3}
lowerCAmelCase : Optional[Any] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : int ):
lowerCAmelCase : Any = 'x = 0\nfor i in range(3):\n x = i'
lowerCAmelCase : str = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {'range': range} , state=UpperCAmelCase_ )
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 2, 'i': 2} )
| 323
| 0
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__A : str = 16
__A : Dict = 32
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
return int(x / 2**20 )
class __A :
def __enter__( self : Dict ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCAmelCase : List[Any] = torch.cuda.memory_allocated()
return self
def __exit__( self : List[Any] , *UpperCAmelCase_ : str ):
gc.collect()
torch.cuda.empty_cache()
lowerCAmelCase : Union[str, Any] = torch.cuda.memory_allocated()
lowerCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
lowerCAmelCase : Optional[int] = bamb(self.end - self.begin )
lowerCAmelCase : Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = 16, _UpperCAmelCase = "bert-base-cased", _UpperCAmelCase = 320, _UpperCAmelCase = 160, ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowerCAmelCase : Dict = load_dataset(
'glue', 'mrpc', split={'train': f"train[:{n_train}]", 'validation': f"validation[:{n_val}]"} )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase : Tuple = tokenizer(examples['sentence1'], examples['sentence2'], truncation=_UpperCAmelCase, max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase : Optional[Any] = datasets.map(
_UpperCAmelCase, batched=_UpperCAmelCase, remove_columns=['idx', 'sentence1', 'sentence2'], load_from_cache_file=_UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase : Tuple = tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCAmelCase, padding='max_length', max_length=128, return_tensors='pt' )
return tokenizer.pad(_UpperCAmelCase, padding='longest', return_tensors='pt' )
# Instantiate dataloaders.
lowerCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets['train'], shuffle=_UpperCAmelCase, collate_fn=_UpperCAmelCase, batch_size=_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets['validation'], shuffle=_UpperCAmelCase, collate_fn=_UpperCAmelCase, batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase : Dict = config['lr']
lowerCAmelCase : List[str] = int(config['num_epochs'] )
lowerCAmelCase : Tuple = int(config['seed'] )
lowerCAmelCase : Optional[Any] = int(config['batch_size'] )
lowerCAmelCase : Optional[Any] = args.model_name_or_path
set_seed(_UpperCAmelCase )
lowerCAmelCase : str = get_dataloaders(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, args.n_train, args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained(_UpperCAmelCase, return_dict=_UpperCAmelCase )
# Instantiate optimizer
lowerCAmelCase : int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase : Optional[Any] = optimizer_cls(params=model.parameters(), lr=_UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCAmelCase : str = 1
lowerCAmelCase : List[str] = (len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase, num_warmup_steps=0, num_training_steps=_UpperCAmelCase, )
else:
lowerCAmelCase : Dict = DummyScheduler(_UpperCAmelCase, total_num_steps=_UpperCAmelCase, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase : Optional[Any] = accelerator.prepare(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase : Any = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase : Optional[Any] = 0
# Now we train the model
lowerCAmelCase : int = {}
for epoch in range(_UpperCAmelCase, _UpperCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = model(**_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = outputs.loss
lowerCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, 'peak_memory_utilization.json' ), 'w' ) as f:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path', type=_UpperCAmelCase, default='bert-base-cased', help='Path to pretrained model or model identifier from huggingface.co/models.', required=_UpperCAmelCase, )
parser.add_argument(
'--output_dir', type=_UpperCAmelCase, default='.', help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.', )
parser.add_argument(
'--peak_memory_upper_bound', type=_UpperCAmelCase, default=_UpperCAmelCase, help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.', )
parser.add_argument(
'--n_train', type=_UpperCAmelCase, default=320, help='Number of training examples to use.', )
parser.add_argument(
'--n_val', type=_UpperCAmelCase, default=160, help='Number of validation examples to use.', )
parser.add_argument(
'--num_epochs', type=_UpperCAmelCase, default=1, help='Number of train epochs.', )
lowerCAmelCase : str = parser.parse_args()
lowerCAmelCase : str = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_UpperCAmelCase, _UpperCAmelCase )
if __name__ == "__main__":
main()
| 352
|
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCAmelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_UpperCAmelCase, 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCAmelCase : Optional[Any] = (sidea + sidea + sidea) / 2
lowerCAmelCase : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 323
| 0
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __A ( unittest.TestCase ):
def lowercase__ ( self : Any ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = -1
lowerCAmelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = model.generate(UpperCAmelCase_ , max_new_tokens=10 , do_sample=UpperCAmelCase_ )
lowerCAmelCase : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : Any = TextStreamer(UpperCAmelCase_ )
model.generate(UpperCAmelCase_ , max_new_tokens=10 , do_sample=UpperCAmelCase_ , streamer=UpperCAmelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : str = cs.out[:-1]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = -1
lowerCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase_ )
lowerCAmelCase : Any = model.generate(UpperCAmelCase_ , max_new_tokens=10 , do_sample=UpperCAmelCase_ )
lowerCAmelCase : int = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase : List[str] = TextIteratorStreamer(UpperCAmelCase_ )
lowerCAmelCase : Any = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
lowerCAmelCase : Dict = Thread(target=model.generate , kwargs=UpperCAmelCase_ )
thread.start()
lowerCAmelCase : Any = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Dict ):
lowerCAmelCase : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase_ )
lowerCAmelCase : str = -1
lowerCAmelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase_ )
lowerCAmelCase : Tuple = model.generate(UpperCAmelCase_ , max_new_tokens=10 , do_sample=UpperCAmelCase_ )
lowerCAmelCase : int = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase : str = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase : Union[str, Any] = TextStreamer(UpperCAmelCase_ , skip_prompt=UpperCAmelCase_ )
model.generate(UpperCAmelCase_ , max_new_tokens=10 , do_sample=UpperCAmelCase_ , streamer=UpperCAmelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase : Any = cs.out[:-1]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCAmelCase : str = AutoTokenizer.from_pretrained('distilgpt2' )
lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(UpperCAmelCase_ )
lowerCAmelCase : str = -1
lowerCAmelCase : List[Any] = torch.ones((1, 5) , device=UpperCAmelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase : Union[str, Any] = TextStreamer(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
model.generate(UpperCAmelCase_ , max_new_tokens=1 , do_sample=UpperCAmelCase_ , streamer=UpperCAmelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase : Optional[Any] = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase : Any = tokenizer(UpperCAmelCase_ , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase_ )
lowerCAmelCase : Dict = -1
lowerCAmelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase_ )
lowerCAmelCase : Tuple = TextIteratorStreamer(UpperCAmelCase_ , timeout=0.0_01 )
lowerCAmelCase : Tuple = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
lowerCAmelCase : Dict = Thread(target=model.generate , kwargs=UpperCAmelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCAmelCase_ ):
lowerCAmelCase : Union[str, Any] = ''
for new_text in streamer:
streamer_text += new_text
| 353
|
from __future__ import annotations
from typing import Any
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Tuple = num_of_nodes
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : dict[int, int] = {}
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Dict , UpperCAmelCase_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase : Dict = self.find_component(UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase : Union[str, Any] = self.find_component(UpperCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : str = []
lowerCAmelCase : Tuple = 0
lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge
lowerCAmelCase : Optional[int] = self.m_component[u]
lowerCAmelCase : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = edge
lowerCAmelCase : Optional[Any] = self.m_component[u]
lowerCAmelCase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
lowerCAmelCase : Optional[Any] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 0
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__A : Any = ['''text''', '''image''', '''audio''']
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(_UpperCAmelCase, _UpperCAmelCase ):
inputs.append(create_inputs(_UpperCAmelCase ) )
else:
raise ValueError(f"Invalid type requested: {input_type}" )
return inputs
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : int = []
for output in outputs:
if isinstance(_UpperCAmelCase, (str, AgentText) ):
output_types.append('text' )
elif isinstance(_UpperCAmelCase, (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(_UpperCAmelCase, (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f"Invalid output: {output}" )
return output_types
@is_tool_test
class __A :
def lowercase__ ( self : Tuple ):
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
lowerCAmelCase : str = self.tool.inputs
for _input in inputs:
if isinstance(_input , UpperCAmelCase_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCAmelCase : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = create_inputs(self.tool.inputs )
lowerCAmelCase : Optional[int] = self.tool(*UpperCAmelCase_ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCAmelCase : Tuple = [outputs]
self.assertListEqual(output_types(UpperCAmelCase_ ) , self.tool.outputs )
def lowercase__ ( self : int ):
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = create_inputs(self.tool.inputs )
lowerCAmelCase : List[str] = self.tool(*UpperCAmelCase_ )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : List[Any] = [outputs]
self.assertEqual(len(UpperCAmelCase_ ) , len(self.tool.outputs ) )
for output, output_type in zip(UpperCAmelCase_ , self.tool.outputs ):
lowerCAmelCase : Dict = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = create_inputs(self.tool.inputs )
lowerCAmelCase : Union[str, Any] = []
for _input, input_type in zip(UpperCAmelCase_ , self.tool.inputs ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCAmelCase : List[str] = self.tool(*UpperCAmelCase_ )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : int = [outputs]
self.assertEqual(len(UpperCAmelCase_ ) , len(self.tool.outputs ) )
| 354
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
__A : str = '''Tobias Carryer'''
from time import time
class __A :
def __init__( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=int(time() ) ): # noqa: B008
lowerCAmelCase : Optional[int] = multiplier
lowerCAmelCase : Dict = increment
lowerCAmelCase : Optional[Any] = modulo
lowerCAmelCase : Optional[int] = seed
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : List[str] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__A : Optional[Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 355
|
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
lowerCAmelCase : Any = sum(i * i for i in range(1, n + 1 ) )
lowerCAmelCase : str = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 323
| 0
|
from collections import defaultdict
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> bool:
'''simple docstring'''
lowerCAmelCase : Any = first_str.lower().strip()
lowerCAmelCase : List[str] = second_str.lower().strip()
# Remove whitespace
lowerCAmelCase : Optional[Any] = first_str.replace(' ', '' )
lowerCAmelCase : str = second_str.replace(' ', '' )
# Strings of different lengths are not anagrams
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
# Default values for count should be 0
lowerCAmelCase : defaultdict[str, int] = defaultdict(_UpperCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_UpperCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__A : Any = input('''Enter the first string ''').strip()
__A : Union[str, Any] = input('''Enter the second string ''').strip()
__A : Any = check_anagrams(input_a, input_b)
print(F'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 356
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 0.0
for coeff in reversed(_UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
__A : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 323
| 0
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 357
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , ):
lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase : str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = min_resolution
lowerCAmelCase : Any = max_resolution
lowerCAmelCase : int = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : int = do_center_crop
lowerCAmelCase : str = crop_size
def lowercase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'crop_size' ) )
def lowercase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : List[str] ):
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Dict = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : List[str] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 323
| 0
|
__A : Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__A : List[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__A : Dict = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__A : Optional[int] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__A : Optional[int] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__A : Tuple = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__A : int = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__A : Optional[Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 358
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : Optional[Any] = ''
else:
lowerCAmelCase : Optional[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : Union[str, Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase : List[Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Optional[int] = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : List[str] = dct.pop(_UpperCAmelCase )
lowerCAmelCase : Dict = val
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : str = ViTMSNConfig()
lowerCAmelCase : str = 1_000
lowerCAmelCase : List[str] = 'datasets/huggingface/label-files'
lowerCAmelCase : int = 'imagenet-1k-id2label.json'
lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase, _UpperCAmelCase ), 'r' ) )
lowerCAmelCase : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[Any] = 384
lowerCAmelCase : List[Any] = 1_536
lowerCAmelCase : Union[str, Any] = 6
elif "l16" in checkpoint_url:
lowerCAmelCase : List[Any] = 1_024
lowerCAmelCase : Any = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Optional[int] = 16
lowerCAmelCase : Any = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase : Any = 4
elif "l7" in checkpoint_url:
lowerCAmelCase : int = 7
lowerCAmelCase : str = 1_024
lowerCAmelCase : Tuple = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Tuple = 16
lowerCAmelCase : Dict = 0.1
lowerCAmelCase : List[str] = ViTMSNModel(_UpperCAmelCase )
lowerCAmelCase : int = torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location='cpu' )['target_encoder']
lowerCAmelCase : int = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCAmelCase )
lowerCAmelCase : Tuple = create_rename_keys(_UpperCAmelCase, base_model=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase, _UpperCAmelCase, base_model=_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCAmelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase : Dict = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
lowerCAmelCase : Any = ViTImageProcessor(
size=config.image_size, image_mean=_UpperCAmelCase, image_std=_UpperCAmelCase )
lowerCAmelCase : List[Any] = image_processor(images=_UpperCAmelCase, return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase : Union[str, Any] = model(**_UpperCAmelCase )
lowerCAmelCase : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[int] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCAmelCase : Union[str, Any] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCAmelCase : Union[str, Any] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], _UpperCAmelCase, atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A : List[str] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 323
| 0
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __A :
def __init__( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Tuple=24 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[Any]=32 , UpperCAmelCase_ : Any=5 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[str]=10 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : str=2 , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Dict = batch_size
lowerCAmelCase : Any = patch_size
lowerCAmelCase : Tuple = max_length
lowerCAmelCase : str = num_mel_bins
lowerCAmelCase : Tuple = is_training
lowerCAmelCase : Optional[int] = use_labels
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : List[Any] = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : List[str] = type_sequence_label_size
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : int = scope
lowerCAmelCase : Any = frequency_stride
lowerCAmelCase : Dict = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase : Union[str, Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowerCAmelCase : Any = (self.max_length - self.patch_size) // self.time_stride + 1
lowerCAmelCase : Optional[Any] = frequency_out_dimension * time_out_dimension
lowerCAmelCase : Any = num_patches + 2
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
lowerCAmelCase : Optional[int] = None
if self.use_labels:
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Union[str, Any] = self.get_config()
return config, input_values, labels
def lowercase__ ( self : Optional[Any] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ):
lowerCAmelCase : Tuple = ASTModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Dict ):
lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
lowerCAmelCase
) : List[Any] = config_and_inputs
lowerCAmelCase : Optional[Any] = {'input_values': input_values}
return config, inputs_dict
@require_torch
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Any = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Any = False
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : str = False
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowercase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = ASTModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def lowercase__ ( self : Any ):
pass
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Optional[Any] = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Tuple = model_class(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ['input_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : List[Any] ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : str = ASTModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
lowerCAmelCase : int = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint', filename='sample_audio.flac', repo_type='dataset' )
lowerCAmelCase : Union[str, Any] = torchaudio.load(_UpperCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Union[str, Any] ):
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def lowercase__ ( self : Tuple ):
lowerCAmelCase : List[str] = self.default_feature_extractor
lowerCAmelCase : str = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(UpperCAmelCase_ )
lowerCAmelCase : int = self.default_feature_extractor
lowerCAmelCase : List[str] = prepare_audio()
lowerCAmelCase : List[str] = audio.squeeze().numpy()
lowerCAmelCase : Union[str, Any] = feature_extractor(UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Any = model(**UpperCAmelCase_ )
# verify the logits
lowerCAmelCase : Union[str, Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
lowerCAmelCase : Any = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 359
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('String lengths must match!' )
lowerCAmelCase : Tuple = 0
for chara, chara in zip(_UpperCAmelCase, _UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 0
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> list[int]:
'''simple docstring'''
lowerCAmelCase : Any = int(_UpperCAmelCase )
# Initialize Result
lowerCAmelCase : Dict = []
# Traverse through all denomination
for denomination in reversed(_UpperCAmelCase ):
# Find denominations
while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ):
total_value -= int(_UpperCAmelCase )
answer.append(_UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__A : Any = []
__A : Optional[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__A : Tuple = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
__A : Tuple = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__A : Tuple = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__A : Union[str, Any] = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'Following is minimal change for {value}: ')
__A : Optional[Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 360
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__A : List[Any] = trt.Logger(trt.Logger.WARNING)
__A : Optional[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__A : List[Any] = logging.getLogger(__name__)
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__A : List[str] = parser.parse_args()
if args.tokenizer_name:
__A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__A : List[Any] = args.per_device_eval_batch_size
__A : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__A : Any = True
__A : Union[str, Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__A : List[str] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__A : Dict = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__A : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__A : str = [network.get_input(i) for i in range(network.num_inputs)]
__A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__A : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__A : List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__A : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = np.asarray(inputs['input_ids'], dtype=np.intaa )
lowerCAmelCase : Optional[int] = np.asarray(inputs['attention_mask'], dtype=np.intaa )
lowerCAmelCase : Dict = np.asarray(inputs['token_type_ids'], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), _UpperCAmelCase )
# start time
lowerCAmelCase : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase : List[str] = time.time()
lowerCAmelCase : Tuple = end_time - start_time
lowerCAmelCase : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__A : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__A : int = raw_datasets['''validation'''].column_names
__A : int = '''question''' if '''question''' in column_names else column_names[0]
__A : List[str] = '''context''' if '''context''' in column_names else column_names[1]
__A : int = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__A : str = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__A : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='only_second' if pad_on_right else 'only_first', max_length=_UpperCAmelCase, stride=args.doc_stride, return_overflowing_tokens=_UpperCAmelCase, return_offsets_mapping=_UpperCAmelCase, padding='max_length', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase : List[Any] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase : Optional[Any] = tokenized_examples.sequence_ids(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__A : int = raw_datasets['''validation''']
# Validation Feature Creation
__A : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__A : List[str] = default_data_collator
__A : int = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__A : Union[str, Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase="eval" ) -> int:
'''simple docstring'''
lowerCAmelCase : str = postprocess_qa_predictions(
examples=_UpperCAmelCase, features=_UpperCAmelCase, predictions=_UpperCAmelCase, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=_UpperCAmelCase, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase : Union[str, Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase : List[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase : Optional[Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCAmelCase, label_ids=_UpperCAmelCase )
__A : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
__A : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__A : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
__A : Union[str, Any] = 0.0
__A : Optional[Any] = 0
__A : Optional[Any] = timeit.default_timer()
__A : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
__A , __A : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__A , __A : str = outputs
__A : Optional[Any] = torch.tensor(start_logits)
__A : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__A : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__A : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__A : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__A : str = nested_truncate(all_preds, len(eval_dataset))
__A : Any = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__A : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__A : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 323
| 0
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__A : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __A ( lowerCAmelCase ):
def __init__( self : str , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int ):
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=None ):
lowerCAmelCase : int = {}
lowerCAmelCase : str = {}
if prompt is not None:
lowerCAmelCase : Tuple = prompt
if generate_kwargs is not None:
lowerCAmelCase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCAmelCase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
lowerCAmelCase : Tuple = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[int] , UpperCAmelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCAmelCase_ : Dict ):
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
def lowercase__ ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple=None ):
lowerCAmelCase : Dict = load_image(UpperCAmelCase_ )
if prompt is not None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError(
f"Received an invalid text input, got - {type(UpperCAmelCase_ )} - but expected a single string. "
'Note also that one single text can be provided for conditional image to text generation.' )
lowerCAmelCase : Dict = self.model.config.model_type
if model_type == "git":
lowerCAmelCase : Union[str, Any] = self.image_processor(images=UpperCAmelCase_ , return_tensors=self.framework )
lowerCAmelCase : str = self.tokenizer(text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids
lowerCAmelCase : int = [self.tokenizer.cls_token_id] + input_ids
lowerCAmelCase : Optional[int] = torch.tensor(UpperCAmelCase_ ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
lowerCAmelCase : Tuple = self.image_processor(images=UpperCAmelCase_ , header_text=UpperCAmelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCAmelCase : Dict = self.image_processor(images=UpperCAmelCase_ , return_tensors=self.framework )
lowerCAmelCase : Optional[Any] = self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCAmelCase_ )
else:
raise ValueError(f"Model type {model_type} does not support conditional text generation" )
else:
lowerCAmelCase : Optional[Any] = self.image_processor(images=UpperCAmelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCAmelCase : str = None
return model_inputs
def lowercase__ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , UpperCAmelCase_ )
and all(x is None for x in model_inputs['input_ids'] )
):
lowerCAmelCase : str = None
if generate_kwargs is None:
lowerCAmelCase : int = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCAmelCase : Optional[int] = model_inputs.pop(self.model.main_input_name )
lowerCAmelCase : int = self.model.generate(UpperCAmelCase_ , **UpperCAmelCase_ , **UpperCAmelCase_ )
return model_outputs
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = []
for output_ids in model_outputs:
lowerCAmelCase : List[str] = {
'generated_text': self.tokenizer.decode(
UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , )
}
records.append(UpperCAmelCase_ )
return records
| 361
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __A ( lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = "dinat"
lowerCAmelCase_ : Dict = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : List[Any]=[3, 4, 6, 5] , UpperCAmelCase_ : Dict=[2, 4, 8, 16] , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCAmelCase_ : int=3.0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[str]=1E-5 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : str = embed_dim
lowerCAmelCase : Any = depths
lowerCAmelCase : List[Any] = len(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = num_heads
lowerCAmelCase : Tuple = kernel_size
lowerCAmelCase : List[str] = dilations
lowerCAmelCase : Any = mlp_ratio
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
lowerCAmelCase : int = layer_scale_init_value
lowerCAmelCase : Optional[Any] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Tuple = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 323
| 0
|
from collections.abc import Iterable
from typing import Generic, TypeVar
__A : Optional[int] = TypeVar('''_T''')
class __A ( Generic[_T] ):
def __init__( self : Any , UpperCAmelCase_ : Iterable[_T] | None = None ):
lowerCAmelCase : list[_T] = list(iterable or [] )
lowerCAmelCase : list[_T] = []
def __len__( self : Dict ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : int ):
return f"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def lowercase__ ( self : Dict , UpperCAmelCase_ : _T ):
self._stacka.append(UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : str = self._stacka.pop
lowerCAmelCase : Tuple = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('Queue is empty' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 362
|
from manim import *
class __A ( lowerCAmelCase ):
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase : List[str] = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Dict = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : str = Text('CPU' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : int = [mem.copy() for i in range(4 )]
lowerCAmelCase : Union[str, Any] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = Text('GPU' , font_size=24 )
lowerCAmelCase : Tuple = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : List[str] = Text('Model' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = []
for i, rect in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Optional[Any] = fill.copy().set_fill(UpperCAmelCase_ , opacity=0.8 )
target.move_to(UpperCAmelCase_ )
model_arr.append(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ )
lowerCAmelCase : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Tuple = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Union[str, Any] = Text('Disk' , font_size=24 )
lowerCAmelCase : Optional[Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase : Optional[int] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Dict = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(UpperCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase_ )
lowerCAmelCase : str = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[Any] = Square(0.3 )
input.set_fill(UpperCAmelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase_ , buff=0.5 )
self.play(Write(UpperCAmelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase_ , buff=0.02 )
self.play(MoveToTarget(UpperCAmelCase_ ) )
self.play(FadeOut(UpperCAmelCase_ ) )
lowerCAmelCase : List[Any] = Arrow(start=UpperCAmelCase_ , end=UpperCAmelCase_ , color=UpperCAmelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCAmelCase : int = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) )
lowerCAmelCase : Optional[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCAmelCase_ ) , Circumscribe(model_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCAmelCase : Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCAmelCase : int = AnimationGroup(
FadeOut(UpperCAmelCase_ , run_time=0.5 ) , MoveToTarget(UpperCAmelCase_ , run_time=0.5 ) , FadeIn(UpperCAmelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCAmelCase : List[str] = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCAmelCase : int = a_c
lowerCAmelCase : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase_ ) , FadeOut(UpperCAmelCase_ , run_time=0.5 ) , )
lowerCAmelCase : int = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) , MoveToTarget(UpperCAmelCase_ ) )
self.wait()
| 323
| 0
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__A : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__A : Optional[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__A : int = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> tuple[str, float]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> tuple[str, str]:
'''simple docstring'''
lowerCAmelCase : List[Any] = random.randint(0, len(_UpperCAmelCase ) - 1 )
lowerCAmelCase : str = parent_a[:random_slice] + parent_a[random_slice:]
lowerCAmelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Optional[int] = list(_UpperCAmelCase )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
lowerCAmelCase : List[Any] = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ) -> list[str]:
'''simple docstring'''
lowerCAmelCase : Optional[int] = []
# Generate more children proportionally to the fitness score.
lowerCAmelCase : Optional[Any] = int(parent_a[1] * 100 ) + 1
lowerCAmelCase : Optional[int] = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
lowerCAmelCase : Dict = population_score[random.randint(0, _UpperCAmelCase )][0]
lowerCAmelCase : str = crossover(parent_a[0], _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase, _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase, _UpperCAmelCase ) )
return pop
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
lowerCAmelCase : int = f"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCAmelCase : List[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCAmelCase : Optional[int] = f"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
lowerCAmelCase : Any = []
for _ in range(_UpperCAmelCase ):
population.append(''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCAmelCase : Optional[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCAmelCase : str = [evaluate(_UpperCAmelCase, _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCAmelCase : Union[str, Any] = sorted(_UpperCAmelCase, key=lambda _UpperCAmelCase : x[1], reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCAmelCase : Dict = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCAmelCase : List[Any] = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )], _UpperCAmelCase, _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__A : int = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__A : Any = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__A : Union[str, Any] = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 363
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : List[Any] = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 364
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__A : Dict = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__A : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__A : List[str] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__A : Any = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__A : Tuple = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__A : Any = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__A : List[str] = tf.keras.preprocessing.image.img_to_array(test_image)
__A : Optional[Any] = np.expand_dims(test_image, axis=0)
__A : int = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__A : Optional[int] = '''Normal'''
if result[0][0] == 1:
__A : str = '''Abnormality detected'''
| 323
| 0
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Tuple = (CMStochasticIterativeScheduler,)
lowerCAmelCase_ : List[Any] = 10
def lowercase__ ( self : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Dict = {
'num_train_timesteps': 201,
'sigma_min': 0.0_02,
'sigma_max': 80.0,
}
config.update(**UpperCAmelCase_ )
return config
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = 10
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0](**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = scheduler.timesteps[0]
lowerCAmelCase : List[str] = scheduler.timesteps[1]
lowerCAmelCase : Union[str, Any] = self.dummy_sample
lowerCAmelCase : List[Any] = 0.1 * sample
lowerCAmelCase : str = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
lowerCAmelCase : Any = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self : Any ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def lowercase__ ( self : str ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCAmelCase_ )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : List[str] = self.scheduler_classes[0]
lowerCAmelCase : Union[str, Any] = self.get_scheduler_config()
lowerCAmelCase : List[Any] = scheduler_class(**UpperCAmelCase_ )
lowerCAmelCase : Tuple = 1
scheduler.set_timesteps(UpperCAmelCase_ )
lowerCAmelCase : Dict = scheduler.timesteps
lowerCAmelCase : Any = torch.manual_seed(0 )
lowerCAmelCase : Dict = self.dummy_model()
lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCAmelCase_ ):
# 1. scale model input
lowerCAmelCase : str = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# 2. predict noise residual
lowerCAmelCase : List[Any] = model(UpperCAmelCase_ , UpperCAmelCase_ )
# 3. predict previous sample x_t-1
lowerCAmelCase : List[Any] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample
lowerCAmelCase : List[Any] = pred_prev_sample
lowerCAmelCase : Dict = torch.sum(torch.abs(UpperCAmelCase_ ) )
lowerCAmelCase : Any = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def lowercase__ ( self : Any ):
lowerCAmelCase : Tuple = self.scheduler_classes[0]
lowerCAmelCase : int = self.get_scheduler_config()
lowerCAmelCase : Any = scheduler_class(**UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [106, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_ )
lowerCAmelCase : Any = scheduler.timesteps
lowerCAmelCase : int = torch.manual_seed(0 )
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowerCAmelCase : Optional[Any] = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# 2. predict noise residual
lowerCAmelCase : Dict = model(UpperCAmelCase_ , UpperCAmelCase_ )
# 3. predict previous sample x_t-1
lowerCAmelCase : str = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample
lowerCAmelCase : List[str] = pred_prev_sample
lowerCAmelCase : List[Any] = torch.sum(torch.abs(UpperCAmelCase_ ) )
lowerCAmelCase : List[str] = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = self.scheduler_classes[0]
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : int = scheduler_class(**UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCAmelCase_ , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : str = self.scheduler_classes[0]
lowerCAmelCase : int = self.get_scheduler_config()
lowerCAmelCase : Union[str, Any] = scheduler_class(**UpperCAmelCase_ )
lowerCAmelCase : Dict = [39, 30, 12, 1, 0]
lowerCAmelCase : str = len(UpperCAmelCase_ )
with self.assertRaises(UpperCAmelCase_ , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
lowerCAmelCase : List[Any] = scheduler_class(**UpperCAmelCase_ )
lowerCAmelCase : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_ )
| 365
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__A : str = logging.getLogger(__name__)
class __A ( lowerCAmelCase ):
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None ):
lowerCAmelCase : List[Any] = self.layer[current_layer](UpperCAmelCase_ , UpperCAmelCase_ , head_mask[current_layer] )
lowerCAmelCase : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = BertEncoderWithPabee(UpperCAmelCase_ )
self.init_weights()
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Dict = 0
def lowercase__ ( self : int , UpperCAmelCase_ : Any ):
lowerCAmelCase : int = threshold
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = patience
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Tuple = 0
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[int] = self.inference_layers_num / self.inference_instances_num
lowerCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(UpperCAmelCase_ )
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCAmelCase : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCAmelCase : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
if token_type_ids is None:
lowerCAmelCase : Union[str, Any] = torch.zeros(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = encoder_hidden_states.size()
lowerCAmelCase : Optional[int] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
lowerCAmelCase : Tuple = self.invert_attention_mask(UpperCAmelCase_ )
else:
lowerCAmelCase : List[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase : Optional[Any] = self.get_head_mask(UpperCAmelCase_ , self.config.num_hidden_layers )
lowerCAmelCase : int = self.embeddings(
input_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ )
lowerCAmelCase : List[str] = embedding_output
if self.training:
lowerCAmelCase : Tuple = []
for i in range(self.config.num_hidden_layers ):
lowerCAmelCase : Dict = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : List[str] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = output_layers[i](output_dropout(UpperCAmelCase_ ) )
res.append(UpperCAmelCase_ )
elif self.patience == 0: # Use all layers for inference
lowerCAmelCase : Union[str, Any] = self.encoder(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
lowerCAmelCase : Optional[Any] = self.pooler(encoder_outputs[0] )
lowerCAmelCase : List[Any] = [output_layers[self.config.num_hidden_layers - 1](UpperCAmelCase_ )]
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[str] = None
lowerCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCAmelCase : Union[str, Any] = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = output_layers[i](UpperCAmelCase_ )
if regression:
lowerCAmelCase : List[str] = logits.detach()
if patient_result is not None:
lowerCAmelCase : List[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCAmelCase : Any = 0
else:
lowerCAmelCase : Union[str, Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCAmelCase : Optional[Any] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCAmelCase_ ) ):
patient_counter += 1
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[Any] = logits
if patient_counter == self.patience:
break
lowerCAmelCase : Dict = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Tuple = config.num_labels
lowerCAmelCase : int = BertModelWithPabee(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase : List[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Any=None , ):
lowerCAmelCase : int = self.bert(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCAmelCase : Any = (logits[-1],)
if labels is not None:
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[int] = 0
for ix, logits_item in enumerate(UpperCAmelCase_ ):
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase : Tuple = MSELoss()
lowerCAmelCase : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase : Tuple = CrossEntropyLoss()
lowerCAmelCase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCAmelCase : Any = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCAmelCase : str = (total_loss / total_weights,) + outputs
return outputs
| 323
| 0
|
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A : Dict = 16
__A : Optional[int] = 32
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 16 ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase : Union[str, Any] = DatasetDict(
{
'train': dataset['train'].select(_UpperCAmelCase ),
'validation': dataset['train'].select(_UpperCAmelCase ),
'test': dataset['validation'],
} )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase : Union[str, Any] = tokenizer(examples['sentence1'], examples['sentence2'], truncation=_UpperCAmelCase, max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase : Optional[Any] = datasets.map(
_UpperCAmelCase, batched=_UpperCAmelCase, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase : Any = tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase : Union[str, Any] = 8
else:
lowerCAmelCase : int = None
return tokenizer.pad(
_UpperCAmelCase, padding='longest', max_length=_UpperCAmelCase, pad_to_multiple_of=_UpperCAmelCase, return_tensors='pt', )
# Instantiate dataloaders.
lowerCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets['train'], shuffle=_UpperCAmelCase, collate_fn=_UpperCAmelCase, batch_size=_UpperCAmelCase )
lowerCAmelCase : List[Any] = DataLoader(
tokenized_datasets['validation'], shuffle=_UpperCAmelCase, collate_fn=_UpperCAmelCase, batch_size=_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets['test'], shuffle=_UpperCAmelCase, collate_fn=_UpperCAmelCase, batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader, test_dataloader
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : int = []
# Download the dataset
lowerCAmelCase : Optional[Any] = load_dataset('glue', 'mrpc' )
# Create our splits
lowerCAmelCase : List[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCAmelCase : List[Any] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase : Any = config['lr']
lowerCAmelCase : str = int(config['num_epochs'] )
lowerCAmelCase : Tuple = int(config['seed'] )
lowerCAmelCase : Optional[Any] = int(config['batch_size'] )
lowerCAmelCase : Optional[Any] = evaluate.load('glue', 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase : List[str] = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
# New Code #
# Create our folds:
lowerCAmelCase : Optional[int] = kfold.split(np.zeros(datasets['train'].num_rows ), datasets['train']['label'] )
lowerCAmelCase : int = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_UpperCAmelCase ):
lowerCAmelCase : List[Any] = get_fold_dataloaders(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase : str = AdamW(params=model.parameters(), lr=_UpperCAmelCase )
# Instantiate scheduler
lowerCAmelCase : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase, num_warmup_steps=100, num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase : Tuple = accelerator.prepare(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase : Union[str, Any] = model(**_UpperCAmelCase )
lowerCAmelCase : Any = outputs.loss
lowerCAmelCase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(**_UpperCAmelCase )
lowerCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase : Tuple = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_UpperCAmelCase, references=_UpperCAmelCase, )
lowerCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", _UpperCAmelCase )
# New Code #
# We also run predictions on the test set at the very end
lowerCAmelCase : Optional[int] = []
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase : List[str] = model(**_UpperCAmelCase )
lowerCAmelCase : List[Any] = outputs.logits
lowerCAmelCase : str = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_UpperCAmelCase, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCAmelCase : List[str] = torch.cat(_UpperCAmelCase, dim=0 )
lowerCAmelCase : List[Any] = torch.stack(_UpperCAmelCase, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCAmelCase : List[str] = metric.compute(predictions=_UpperCAmelCase, references=_UpperCAmelCase )
accelerator.print('Average test metrics from all folds:', _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=_UpperCAmelCase, default=_UpperCAmelCase, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds', type=_UpperCAmelCase, default=3, help='The number of splits to perform across the dataset' )
lowerCAmelCase : List[Any] = parser.parse_args()
lowerCAmelCase : Any = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCAmelCase, _UpperCAmelCase )
if __name__ == "__main__":
main()
| 366
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = "deberta-v2"
def __init__( self : int , UpperCAmelCase_ : Dict=128100 , UpperCAmelCase_ : Optional[int]=1536 , UpperCAmelCase_ : Tuple=24 , UpperCAmelCase_ : Any=24 , UpperCAmelCase_ : Any=6144 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[int]=1E-7 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Dict=-1 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : int="gelu" , **UpperCAmelCase_ : int , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : str = type_vocab_size
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : Union[str, Any] = relative_attention
lowerCAmelCase : List[Any] = max_relative_positions
lowerCAmelCase : List[Any] = pad_token_id
lowerCAmelCase : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase_ ) == str:
lowerCAmelCase : Tuple = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase : str = pos_att_type
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : str = kwargs.get('pooler_hidden_size' , UpperCAmelCase_ )
lowerCAmelCase : Tuple = pooler_dropout
lowerCAmelCase : Union[str, Any] = pooler_hidden_act
class __A ( lowerCAmelCase ):
@property
def lowercase__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
lowerCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowercase__ ( self : int ):
return 12
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : "PreTrainedTokenizerBase" = None , ):
lowerCAmelCase : List[str] = super().generate_dummy_inputs(preprocessor=UpperCAmelCase_ , framework=UpperCAmelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 323
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : Union[str, Any] = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367
|
__A : Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__A : List[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__A : Dict = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__A : Optional[int] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__A : Optional[int] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__A : Tuple = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__A : int = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__A : Optional[Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 323
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 368
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A : List[Any] = '''pt'''
elif is_tf_available():
__A : List[str] = '''tf'''
else:
__A : Optional[Any] = '''jax'''
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : str = PerceiverTokenizer
lowerCAmelCase_ : int = False
def lowercase__ ( self : List[str] ):
super().setUp()
lowerCAmelCase : List[str] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self : Union[str, Any] ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase__ ( self : Optional[Any] , **UpperCAmelCase_ : List[str] ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : str=20 , UpperCAmelCase_ : List[str]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase : Dict = []
for i in range(len(UpperCAmelCase_ ) ):
try:
lowerCAmelCase : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase : Optional[Any] = list(filter(lambda UpperCAmelCase_ : re.match(R'^[ a-zA-Z]+$' , t[1] ) , UpperCAmelCase_ ) )
lowerCAmelCase : Union[str, Any] = list(filter(lambda UpperCAmelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase_ ) , UpperCAmelCase_ ) )
if max_length is not None and len(UpperCAmelCase_ ) > max_length:
lowerCAmelCase : Optional[int] = toks[:max_length]
if min_length is not None and len(UpperCAmelCase_ ) < min_length and len(UpperCAmelCase_ ) > 0:
while len(UpperCAmelCase_ ) < min_length:
lowerCAmelCase : List[Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase : Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase : Optional[Any] = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
if " " not in output_txt and len(UpperCAmelCase_ ) > 1:
lowerCAmelCase : List[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase_ )
)
if with_prefix_space:
lowerCAmelCase : List[str] = ' ' + output_txt
lowerCAmelCase : Tuple = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
return output_txt, output_ids
def lowercase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = self.perceiver_tokenizer
lowerCAmelCase : Optional[int] = 'Unicode €.'
lowerCAmelCase : str = tokenizer(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , UpperCAmelCase_ )
# decoding
lowerCAmelCase : List[Any] = tokenizer.decode(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , '[CLS]Unicode €.[SEP]' )
lowerCAmelCase : Union[str, Any] = tokenizer('e è é ê ë' )
lowerCAmelCase : int = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , UpperCAmelCase_ )
# decoding
lowerCAmelCase : List[Any] = tokenizer.decode(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = self.perceiver_tokenizer
lowerCAmelCase : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCAmelCase : Optional[int] = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowerCAmelCase : Optional[Any] = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
if FRAMEWORK != "jax":
lowerCAmelCase : Dict = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.perceiver_tokenizer
lowerCAmelCase : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCAmelCase_ )
self.assertIn('attention_mask' , UpperCAmelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCAmelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = self.perceiver_tokenizer
lowerCAmelCase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
lowerCAmelCase : List[str] = tokenizer(
text_target=UpperCAmelCase_ , max_length=32 , padding='max_length' , truncation=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase__ ( self : Union[str, Any] ):
# safety check on max_len default value so we are sure the test works
lowerCAmelCase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase : List[str] = tempfile.mkdtemp()
lowerCAmelCase : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
tokenizer.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : str = tokenizer.__class__.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase : int = after_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
shutil.rmtree(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase : List[str] = tempfile.mkdtemp()
lowerCAmelCase : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCAmelCase : List[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCAmelCase : Tuple = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
tokenizer.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.__class__.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = after_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase : Tuple = tokenizer.__class__.from_pretrained(UpperCAmelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase : List[Any] = json.load(UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase : List[Any] = json.load(UpperCAmelCase_ )
lowerCAmelCase : str = [f"<extra_id_{i}>" for i in range(125 )]
lowerCAmelCase : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCAmelCase : Union[str, Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCAmelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase : str = tokenizer_class.from_pretrained(
UpperCAmelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase : Dict = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCAmelCase_ )]
lowerCAmelCase : List[str] = tokenizer_class.from_pretrained(
UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase__ ( self : Dict ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : Union[str, Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : List[Any] ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCAmelCase : Optional[int] = self.get_tokenizers(fast=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCAmelCase : int = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_string(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 369
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[Any]=37 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=None , ):
lowerCAmelCase : int = parent
lowerCAmelCase : Any = 13
lowerCAmelCase : Union[str, Any] = 7
lowerCAmelCase : List[Any] = True
lowerCAmelCase : List[str] = True
lowerCAmelCase : Tuple = True
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Tuple = 99
lowerCAmelCase : Optional[Any] = 32
lowerCAmelCase : List[str] = 2
lowerCAmelCase : str = 4
lowerCAmelCase : Optional[Any] = 37
lowerCAmelCase : List[Any] = 'gelu'
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Optional[Any] = 512
lowerCAmelCase : Dict = 16
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : Union[str, Any] = 0.02
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : List[str] = 4
lowerCAmelCase : Any = None
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = None
if self.use_input_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Dict = None
if self.use_token_type_ids:
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[str] = None
lowerCAmelCase : Any = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ):
lowerCAmelCase : List[Any] = TFRoFormerModel(config=UpperCAmelCase_ )
lowerCAmelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : str = [input_ids, input_mask]
lowerCAmelCase : Any = model(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
lowerCAmelCase : str = True
lowerCAmelCase : List[str] = TFRoFormerForCausalLM(config=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : List[str] = model(UpperCAmelCase_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = TFRoFormerForMaskedLM(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : str = self.num_labels
lowerCAmelCase : Optional[Any] = TFRoFormerForSequenceClassification(config=UpperCAmelCase_ )
lowerCAmelCase : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : Dict = self.num_choices
lowerCAmelCase : str = TFRoFormerForMultipleChoice(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : int = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Union[str, Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = self.num_labels
lowerCAmelCase : Any = TFRoFormerForTokenClassification(config=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[int] = TFRoFormerForQuestionAnswering(config=UpperCAmelCase_ )
lowerCAmelCase : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : List[str] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : Optional[Any] = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : int = False
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase__ ( self : int ):
lowerCAmelCase : List[Any] = TFRoFormerModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : Dict ):
lowerCAmelCase : str = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : Tuple = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0]
# TODO Replace vocab size
lowerCAmelCase : Any = 50000
lowerCAmelCase : str = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = tf.constant([[4, 10]] )
lowerCAmelCase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCAmelCase : int = emba(input_ids.shape )
lowerCAmelCase : str = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
def lowercase__ ( self : int ):
lowerCAmelCase : Dict = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
lowerCAmelCase : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowerCAmelCase : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : List[Any] ):
# 2,12,16,64
lowerCAmelCase : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : List[str] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCAmelCase : List[Any] = embed_positions([2, 16, 768] )[None, None, :, :]
lowerCAmelCase , lowerCAmelCase : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
| 323
| 0
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
__A : Optional[Any] = get_tests_dir('''fixtures/dummy-config.json''')
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = 0
def lowercase__ ( self : Any ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Dict = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = AutoConfig.for_model('roberta' )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase : Optional[int] = os.path.join(UpperCAmelCase_ , 'fake-roberta' )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase : int = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertEqual(type(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
try:
AutoConfig.register('custom' , UpperCAmelCase_ )
# Wrong model type will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register('model' , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoConfig.register('bert' , UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase : Optional[int] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowercase__ ( self : Tuple ):
with self.assertRaisesRegex(
UpperCAmelCase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained('bert-base' )
def lowercase__ ( self : Tuple ):
with self.assertRaisesRegex(
UpperCAmelCase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCAmelCase : int = AutoConfig.from_pretrained(UpperCAmelCase_ , revision='aaaaaa' )
def lowercase__ ( self : List[Any] ):
with self.assertRaisesRegex(
UpperCAmelCase_ , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
lowerCAmelCase : str = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def lowercase__ ( self : str ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase_ ):
lowerCAmelCase : int = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=UpperCAmelCase_ )
lowerCAmelCase : Dict = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def lowercase__ ( self : Optional[int] ):
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Any = "new-model"
try:
AutoConfig.register('new-model' , UpperCAmelCase_ )
# If remote code is not set, the default is to use local
lowerCAmelCase : List[Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
lowerCAmelCase : List[Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 370
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase : Tuple = {'unk_token': '<unk>'}
lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
lowerCAmelCase : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Any , **UpperCAmelCase_ : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Tuple , **UpperCAmelCase_ : str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : List[str] = self.get_rust_tokenizer()
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = self.prepare_image_inputs()
lowerCAmelCase : List[str] = image_processor(UpperCAmelCase_ , return_tensors='np' )
lowerCAmelCase : int = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : Dict = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = 'lower newer'
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = 'lower newer'
lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase : Union[str, Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : Any = processor.batch_decode(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = 'lower newer'
lowerCAmelCase : Tuple = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 323
| 0
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __A :
def __init__( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]=14 , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int=None , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : Optional[int] = is_training
lowerCAmelCase : List[Any] = use_token_type_ids
lowerCAmelCase : Any = use_input_mask
lowerCAmelCase : List[Any] = use_labels
lowerCAmelCase : List[Any] = use_mc_token_ids
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : Dict = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = max_position_embeddings
lowerCAmelCase : List[str] = type_vocab_size
lowerCAmelCase : Optional[int] = type_sequence_label_size
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Union[str, Any] = num_labels
lowerCAmelCase : Union[str, Any] = num_choices
lowerCAmelCase : List[str] = scope
lowerCAmelCase : List[str] = self.vocab_size - 1
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[Any] = None
if self.use_mc_token_ids:
lowerCAmelCase : str = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = None
if self.use_labels:
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : int = self.get_config()
lowerCAmelCase : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : List[str] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowercase__ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , *UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : Optional[Any] = CTRLModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , *UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = CTRLLMHeadModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Optional[int] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
lowerCAmelCase
) : Tuple = config_and_inputs
lowerCAmelCase : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : Union[str, Any] = CTRLForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __A ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase_ : List[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCAmelCase_ : str = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : str = False
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = CTRLModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=UpperCAmelCase_ , n_embd=37 )
def lowercase__ ( self : Tuple ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCAmelCase_ )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase__ ( self : Tuple ):
pass
@slow
def lowercase__ ( self : List[str] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = CTRLModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : List[str] ):
pass
@require_torch
class __A ( unittest.TestCase ):
def lowercase__ ( self : Dict ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : str = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(UpperCAmelCase_ )
lowerCAmelCase : List[str] = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=UpperCAmelCase_ ) # Legal the president is
lowerCAmelCase : Dict = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCAmelCase : int = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase_ )
| 371
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
from collections import defaultdict
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : List[str] = 1
lowerCAmelCase : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(_UpperCAmelCase )
if ret % 2 == 0:
cuts.append(_UpperCAmelCase )
return ret
def SCREAMING_SNAKE_CASE__ ( ) -> str:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
__A : Union[str, Any] = 10, 9
__A : Tuple = defaultdict(list)
__A : dict[int, bool] = {}
__A : list[int] = []
__A : Any = 0
__A : Tuple = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 350
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase_ : Optional[List[bool]]
lowerCAmelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 323
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase : Tuple = {'unk_token': '<unk>'}
lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
lowerCAmelCase : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Any , **UpperCAmelCase_ : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Tuple , **UpperCAmelCase_ : str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : List[str] = self.get_rust_tokenizer()
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = self.prepare_image_inputs()
lowerCAmelCase : List[str] = image_processor(UpperCAmelCase_ , return_tensors='np' )
lowerCAmelCase : int = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : Dict = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = 'lower newer'
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = 'lower newer'
lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase : Union[str, Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : Any = processor.batch_decode(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = 'lower newer'
lowerCAmelCase : Tuple = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 351
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return x + 2
class __A ( unittest.TestCase ):
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = 'x = 3'
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
lowerCAmelCase : Dict = 'x = y'
lowerCAmelCase : List[Any] = {'y': 5}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 5, 'y': 5} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = 'y = add_two(x)'
lowerCAmelCase : int = {'x': 3}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 'x = 3'
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : Any ):
lowerCAmelCase : Union[str, Any] = 'x = 3\ny = 5'
lowerCAmelCase : str = {}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = 'text = f\'This is x: {x}.\''
lowerCAmelCase : str = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'text': 'This is x: 3.'} )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = 'if x <= 3:\n y = 2\nelse:\n y = 5'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 2} )
lowerCAmelCase : Any = {'x': 8}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 8, 'y': 5} )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = 'test_list = [x, add_two(x)]'
lowerCAmelCase : Optional[Any] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [3, 5] )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : int = 'y = x'
lowerCAmelCase : Optional[int] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 3} )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = 'test_list = [x, add_two(x)]\ntest_list[1]'
lowerCAmelCase : List[str] = {'x': 3}
lowerCAmelCase : List[str] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
lowerCAmelCase : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
lowerCAmelCase : List[Any] = {'x': 3}
lowerCAmelCase : Optional[Any] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : int ):
lowerCAmelCase : Any = 'x = 0\nfor i in range(3):\n x = i'
lowerCAmelCase : str = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {'range': range} , state=UpperCAmelCase_ )
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 2, 'i': 2} )
| 323
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Optional[int] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase : str = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCAmelCase : Any = 4
lowerCAmelCase : Tuple = 48
lowerCAmelCase : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase : List[str] = [6, 6, 6, 6]
lowerCAmelCase : Tuple = 60
lowerCAmelCase : Any = [6, 6, 6, 6]
lowerCAmelCase : Dict = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase : int = 4
lowerCAmelCase : Dict = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCAmelCase : str = 1
lowerCAmelCase : Any = 1
lowerCAmelCase : Tuple = 126
lowerCAmelCase : Tuple = 7
lowerCAmelCase : Optional[int] = 255.0
lowerCAmelCase : Any = ''
return config
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
lowerCAmelCase : List[str] = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCAmelCase : Any = name.replace('patch_embed.norm', 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
lowerCAmelCase : Any = name.replace('layers', 'encoder.stages' )
if "residual_group.blocks" in name:
lowerCAmelCase : int = name.replace('residual_group.blocks', 'layers' )
if "attn.proj" in name:
lowerCAmelCase : str = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
lowerCAmelCase : str = name.replace('attn', 'attention.self' )
if "norm1" in name:
lowerCAmelCase : Optional[int] = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase : Any = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
lowerCAmelCase : Any = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase : Tuple = name.replace('mlp.fc2', 'output.dense' )
if "q_bias" in name:
lowerCAmelCase : Any = name.replace('q_bias', 'query.bias' )
if "k_bias" in name:
lowerCAmelCase : Any = name.replace('k_bias', 'key.bias' )
if "v_bias" in name:
lowerCAmelCase : Dict = name.replace('v_bias', 'value.bias' )
if "cpb_mlp" in name:
lowerCAmelCase : Optional[Any] = name.replace('cpb_mlp', 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
lowerCAmelCase : Tuple = name.replace('patch_embed.proj', 'patch_embed.projection' )
if name == "norm.weight":
lowerCAmelCase : Any = 'layernorm.weight'
if name == "norm.bias":
lowerCAmelCase : int = 'layernorm.bias'
if "conv_first" in name:
lowerCAmelCase : str = name.replace('conv_first', 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCAmelCase : Any = name.replace('conv_last', 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCAmelCase : int = name.replace('conv_before_upsample.0', 'conv_before_upsample' )
if "upsample.0" in name:
lowerCAmelCase : Optional[Any] = name.replace('upsample.0', 'upsample.convolution_0' )
if "upsample.2" in name:
lowerCAmelCase : Union[str, Any] = name.replace('upsample.2', 'upsample.convolution_1' )
lowerCAmelCase : str = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
lowerCAmelCase : Dict = name.replace('upsample.0.weight', 'upsample.conv.weight' )
lowerCAmelCase : Optional[Any] = name.replace('upsample.0.bias', 'upsample.conv.bias' )
else:
pass
else:
lowerCAmelCase : List[Any] = 'swin2sr.' + name
return name
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase : int = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
lowerCAmelCase : Optional[Any] = key.split('.' )
lowerCAmelCase : Optional[int] = int(key_split[1] )
lowerCAmelCase : Union[str, Any] = int(key_split[4] )
lowerCAmelCase : Optional[Any] = config.embed_dim
if "weight" in key:
lowerCAmelCase : List[Any] = val[:dim, :]
lowerCAmelCase : int = val[dim : dim * 2, :]
lowerCAmelCase : Union[str, Any] = val[-dim:, :]
else:
lowerCAmelCase : Dict = val[:dim]
lowerCAmelCase : Optional[Any] = val[dim : dim * 2]
lowerCAmelCase : int = val[-dim:]
pass
else:
lowerCAmelCase : int = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : str = get_config(_UpperCAmelCase )
lowerCAmelCase : int = SwinaSRForImageSuperResolution(_UpperCAmelCase )
model.eval()
lowerCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location='cpu' )
lowerCAmelCase : List[Any] = convert_state_dict(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : List[str] = model.load_state_dict(_UpperCAmelCase, strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_UpperCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
lowerCAmelCase : Optional[int] = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
lowerCAmelCase : List[str] = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw ).convert('RGB' )
lowerCAmelCase : List[Any] = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCAmelCase : List[Any] = 126 if 'Jpeg' in checkpoint_url else 256
lowerCAmelCase : Union[str, Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCAmelCase : Any = transforms(_UpperCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowerCAmelCase : Union[str, Any] = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCAmelCase : str = model(_UpperCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCAmelCase : List[Any] = torch.Size([1, 3, 512, 512] )
lowerCAmelCase : str = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase : Optional[int] = torch.Size([1, 3, 1_024, 1_024] )
lowerCAmelCase : List[str] = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCAmelCase : int = torch.Size([1, 3, 1_024, 1_024] )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase : Tuple = torch.Size([1, 3, 512, 512] )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase : int = torch.Size([1, 3, 1_024, 1_024] )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _UpperCAmelCase, atol=1e-3 )
print('Looks ok!' )
lowerCAmelCase : Any = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
lowerCAmelCase : Optional[Any] = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__A : List[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 352
|
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCAmelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_UpperCAmelCase, 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCAmelCase : Optional[Any] = (sidea + sidea + sidea) / 2
lowerCAmelCase : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 323
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : str = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = "xlm-roberta-xl"
def __init__( self : Tuple , UpperCAmelCase_ : Union[str, Any]=250880 , UpperCAmelCase_ : Any=2560 , UpperCAmelCase_ : Optional[Any]=36 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : int=10240 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : int=514 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[str]=1E-05 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[int]="absolute" , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : str , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : Optional[Any] = num_attention_heads
lowerCAmelCase : int = hidden_act
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : int = max_position_embeddings
lowerCAmelCase : Optional[Any] = type_vocab_size
lowerCAmelCase : Optional[int] = initializer_range
lowerCAmelCase : List[str] = layer_norm_eps
lowerCAmelCase : Optional[Any] = position_embedding_type
lowerCAmelCase : Tuple = use_cache
lowerCAmelCase : Tuple = classifier_dropout
class __A ( lowerCAmelCase ):
@property
def lowercase__ ( self : Dict ):
if self.task == "multiple-choice":
lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 353
|
from __future__ import annotations
from typing import Any
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Tuple = num_of_nodes
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : dict[int, int] = {}
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Dict , UpperCAmelCase_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase : Dict = self.find_component(UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase : Union[str, Any] = self.find_component(UpperCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : str = []
lowerCAmelCase : Tuple = 0
lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge
lowerCAmelCase : Optional[int] = self.m_component[u]
lowerCAmelCase : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = edge
lowerCAmelCase : Optional[Any] = self.m_component[u]
lowerCAmelCase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
lowerCAmelCase : Optional[Any] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 0
|
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCAmelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_UpperCAmelCase, 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCAmelCase : Optional[Any] = (sidea + sidea + sidea) / 2
lowerCAmelCase : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f'Rectangle: {area_rectangle(10, 20) = }')
print(f'Square: {area_square(10) = }')
print(f'Triangle: {area_triangle(10, 10) = }')
print(f'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(f'Parallelogram: {area_parallelogram(10, 20) = }')
print(f'Rhombus: {area_rhombus(10, 20) = }')
print(f'Trapezium: {area_trapezium(10, 20, 30) = }')
print(f'Circle: {area_circle(20) = }')
print(f'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(f'Cube: {surface_area_cube(20) = }')
print(f'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(f'Sphere: {surface_area_sphere(20) = }')
print(f'Hemisphere: {surface_area_hemisphere(20) = }')
print(f'Cone: {surface_area_cone(10, 20) = }')
print(f'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(f'Cylinder: {surface_area_cylinder(10, 20) = }')
print(f'Torus: {surface_area_torus(20, 10) = }')
print(f'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(f'Square: {area_reg_polygon(4, 10) = }')
print(f'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 354
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 355
|
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
lowerCAmelCase : Any = sum(i * i for i in range(1, n + 1 ) )
lowerCAmelCase : str = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 323
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__A : Optional[int] = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__A : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 356
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 0.0
for coeff in reversed(_UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
__A : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 323
| 0
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__A : Union[str, Any] = '''bart'''
__A : Optional[int] = True
@st.cache(allow_output_mutation=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
if LOAD_DENSE_INDEX:
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
lowerCAmelCase : List[str] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
lowerCAmelCase : Union[str, Any] = qar_model.eval()
else:
lowerCAmelCase : Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
lowerCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
lowerCAmelCase : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
lowerCAmelCase : List[Any] = sas_model.eval()
else:
lowerCAmelCase : str = make_qa_sas_model(
model_name='t5-small', from_file='seq2seq_models/eli5_t5_model_1024_4.pth', device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
lowerCAmelCase : Optional[Any] = faiss.StandardGpuResources()
lowerCAmelCase : List[Any] = datasets.load_dataset(path='wiki_snippets', name='wiki40b_en_100_0' )['train']
lowerCAmelCase : Optional[int] = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat', dtype='float32', mode='r', shape=(wikiaab_passages.num_rows, 128), )
lowerCAmelCase : List[Any] = faiss.IndexFlatIP(128 )
lowerCAmelCase : Tuple = faiss.index_cpu_to_gpu(_UpperCAmelCase, 1, _UpperCAmelCase )
wikiaab_gpu_index_flat.add(_UpperCAmelCase ) # TODO fix for larger GPU
else:
lowerCAmelCase : Dict = (None, None)
lowerCAmelCase : Any = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = datasets.load_dataset('eli5', name='LFQA_reddit' )
lowerCAmelCase : Optional[Any] = elia['train_eli5']
lowerCAmelCase : Optional[int] = np.memmap(
'eli5_questions_reps.dat', dtype='float32', mode='r', shape=(elia_train.num_rows, 128) )
lowerCAmelCase : Optional[int] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_UpperCAmelCase )
return (elia_train, eli5_train_q_index)
__A : Any = load_indexes()
__A : Optional[int] = load_models()
__A : List[Any] = load_train_data()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=10 ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : List[str] = embed_questions_for_retrieval([question], _UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : List[Any] = eli5_train_q_index.search(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : Tuple = [elia_train[int(_UpperCAmelCase )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase="wiki40b", _UpperCAmelCase="dense", _UpperCAmelCase=10 ) -> str:
'''simple docstring'''
if source == "none":
lowerCAmelCase : str = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowerCAmelCase : Optional[int] = query_qa_dense_index(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
else:
lowerCAmelCase : str = query_es_index(
_UpperCAmelCase, _UpperCAmelCase, index_name='english_wiki40b_snippets_100w', n_results=_UpperCAmelCase, )
lowerCAmelCase : Union[str, Any] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
lowerCAmelCase : Dict = 'question: {} context: {}'.format(_UpperCAmelCase, _UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase : None),
} )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=64, _UpperCAmelCase=256, _UpperCAmelCase=False, _UpperCAmelCase=2, _UpperCAmelCase=0.9_5, _UpperCAmelCase=0.8 ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
lowerCAmelCase : Tuple = qa_sas_generate(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, num_answers=1, num_beams=_UpperCAmelCase, min_len=_UpperCAmelCase, max_len=_UpperCAmelCase, do_sample=_UpperCAmelCase, temp=_UpperCAmelCase, top_p=_UpperCAmelCase, top_k=_UpperCAmelCase, max_input_length=1_024, device='cuda:0', )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__A : int = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__A : str = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__A : int = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__A : Union[str, Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__A : int = st.sidebar.checkbox('''Demo options''')
if demo_options:
__A : Any = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__A : Optional[int] = action_list.index(action_st)
__A : Optional[Any] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__A : Tuple = show_type == '''Show full text of passages'''
else:
__A : Dict = 3
__A : Dict = True
__A : Optional[int] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__A : Tuple = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__A : Optional[int] = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__A : Any = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__A : Any = '''wiki40b'''
__A : Optional[int] = '''dense'''
__A : Union[str, Any] = '''beam'''
__A : Optional[Any] = 2
__A : Union[str, Any] = 64
__A : Dict = 256
__A : Dict = None
__A : Optional[Any] = None
__A : List[str] = st.sidebar.checkbox('''Generation options''')
if generate_options:
__A : Tuple = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__A : str = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__A : List[Any] = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__A : str = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__A : Optional[Any] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__A : Tuple = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__A : Any = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__A : List[str] = None
# start main text
__A : int = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__A : List[str] = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__A : Tuple = st.text_input('''Enter your question here:''', '''''')
else:
__A : List[str] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__A : Optional[Any] = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__A : Tuple = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__A : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__A : Optional[Any] = support_list[:10]
__A : Union[str, Any] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__A : Optional[int] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__A : str = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__A : Any = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__A : Any = res[1].strip()
if sec_titles == "":
__A : Any = '''[{}]({})'''.format(res[0], wiki_url)
else:
__A : Tuple = sec_titles.split(''' & ''')
__A : Tuple = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__A : Tuple = find_nearest_training(question)
__A : str = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__A : Tuple = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__A : str = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 357
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , ):
lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase : str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = min_resolution
lowerCAmelCase : Any = max_resolution
lowerCAmelCase : int = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : int = do_center_crop
lowerCAmelCase : str = crop_size
def lowercase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'crop_size' ) )
def lowercase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : List[str] ):
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Dict = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : List[str] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 323
| 0
|
from manim import *
class __A ( lowerCAmelCase ):
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase : List[str] = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Dict = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : str = Text('CPU' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : int = [mem.copy() for i in range(4 )]
lowerCAmelCase : Union[str, Any] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = Text('GPU' , font_size=24 )
lowerCAmelCase : Tuple = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : List[str] = Text('Model' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = []
for i, rect in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Optional[Any] = fill.copy().set_fill(UpperCAmelCase_ , opacity=0.8 )
target.move_to(UpperCAmelCase_ )
model_arr.append(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ )
lowerCAmelCase : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Tuple = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Union[str, Any] = Text('Disk' , font_size=24 )
lowerCAmelCase : Optional[Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase : Optional[int] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Dict = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(UpperCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase_ )
lowerCAmelCase : str = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[Any] = Square(0.3 )
input.set_fill(UpperCAmelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase_ , buff=0.5 )
self.play(Write(UpperCAmelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase_ , buff=0.02 )
self.play(MoveToTarget(UpperCAmelCase_ ) )
self.play(FadeOut(UpperCAmelCase_ ) )
lowerCAmelCase : List[Any] = Arrow(start=UpperCAmelCase_ , end=UpperCAmelCase_ , color=UpperCAmelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCAmelCase : int = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) )
lowerCAmelCase : Optional[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCAmelCase_ ) , Circumscribe(model_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCAmelCase : Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCAmelCase : int = AnimationGroup(
FadeOut(UpperCAmelCase_ , run_time=0.5 ) , MoveToTarget(UpperCAmelCase_ , run_time=0.5 ) , FadeIn(UpperCAmelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCAmelCase : List[str] = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCAmelCase : int = a_c
lowerCAmelCase : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase_ ) , FadeOut(UpperCAmelCase_ , run_time=0.5 ) , )
lowerCAmelCase : int = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) , MoveToTarget(UpperCAmelCase_ ) )
self.wait()
| 358
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : Optional[Any] = ''
else:
lowerCAmelCase : Optional[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : Union[str, Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase : List[Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Optional[int] = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : List[str] = dct.pop(_UpperCAmelCase )
lowerCAmelCase : Dict = val
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : str = ViTMSNConfig()
lowerCAmelCase : str = 1_000
lowerCAmelCase : List[str] = 'datasets/huggingface/label-files'
lowerCAmelCase : int = 'imagenet-1k-id2label.json'
lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase, _UpperCAmelCase ), 'r' ) )
lowerCAmelCase : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[Any] = 384
lowerCAmelCase : List[Any] = 1_536
lowerCAmelCase : Union[str, Any] = 6
elif "l16" in checkpoint_url:
lowerCAmelCase : List[Any] = 1_024
lowerCAmelCase : Any = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Optional[int] = 16
lowerCAmelCase : Any = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase : Any = 4
elif "l7" in checkpoint_url:
lowerCAmelCase : int = 7
lowerCAmelCase : str = 1_024
lowerCAmelCase : Tuple = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Tuple = 16
lowerCAmelCase : Dict = 0.1
lowerCAmelCase : List[str] = ViTMSNModel(_UpperCAmelCase )
lowerCAmelCase : int = torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location='cpu' )['target_encoder']
lowerCAmelCase : int = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCAmelCase )
lowerCAmelCase : Tuple = create_rename_keys(_UpperCAmelCase, base_model=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase, _UpperCAmelCase, base_model=_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCAmelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase : Dict = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
lowerCAmelCase : Any = ViTImageProcessor(
size=config.image_size, image_mean=_UpperCAmelCase, image_std=_UpperCAmelCase )
lowerCAmelCase : List[Any] = image_processor(images=_UpperCAmelCase, return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase : Union[str, Any] = model(**_UpperCAmelCase )
lowerCAmelCase : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[int] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCAmelCase : Union[str, Any] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCAmelCase : Union[str, Any] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], _UpperCAmelCase, atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A : List[str] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 323
| 0
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = 'ylacombe/bark-small'
lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase : int = 'en_speaker_1'
lowerCAmelCase : Union[str, Any] = 'This is a test string'
lowerCAmelCase : Any = 'speaker_embeddings_path.json'
lowerCAmelCase : Any = 'speaker_embeddings'
def lowercase__ ( self : Tuple , **UpperCAmelCase_ : Optional[Any] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = self.get_tokenizer()
lowerCAmelCase : Dict = BarkProcessor(tokenizer=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Dict = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowercase__ ( self : int ):
lowerCAmelCase : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCAmelCase : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Dict = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCAmelCase : Dict = 35
lowerCAmelCase : Any = 2
lowerCAmelCase : str = 8
lowerCAmelCase : Optional[int] = {
'semantic_prompt': np.ones(UpperCAmelCase_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCAmelCase : List[str] = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
lowerCAmelCase : Any = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Tuple = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
lowerCAmelCase : Tuple = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCAmelCase : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = self.get_tokenizer()
lowerCAmelCase : Optional[int] = BarkProcessor(tokenizer=UpperCAmelCase_ )
lowerCAmelCase : int = processor(text=self.input_string )
lowerCAmelCase : str = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 359
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('String lengths must match!' )
lowerCAmelCase : Tuple = 0
for chara, chara in zip(_UpperCAmelCase, _UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 0
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( lowerCAmelCase ):
"""simple docstring"""
lowerCAmelCase_ : str = ["image_processor", "tokenizer"]
lowerCAmelCase_ : Optional[int] = "Pix2StructImageProcessor"
lowerCAmelCase_ : Union[str, Any] = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : List[Any] = False
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = 2048 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : Any , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowerCAmelCase : List[str] = self.tokenizer
lowerCAmelCase : int = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowerCAmelCase : List[str] = self.image_processor(
UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , max_patches=UpperCAmelCase_ , **UpperCAmelCase_ )
else:
# add pixel_values and bbox
lowerCAmelCase : List[Any] = self.image_processor(
UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , max_patches=UpperCAmelCase_ , header_text=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and not self.image_processor.is_vqa:
lowerCAmelCase : Dict = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
if "attention_mask" in text_encoding:
lowerCAmelCase : Optional[Any] = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
lowerCAmelCase : str = text_encoding.pop('input_ids' )
else:
lowerCAmelCase : List[str] = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase_ )
return encoding_image_processor
def lowercase__ ( self : List[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Tuple ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowercase__ ( self : str , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.tokenizer.model_input_names
lowerCAmelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 360
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__A : List[Any] = trt.Logger(trt.Logger.WARNING)
__A : Optional[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__A : List[Any] = logging.getLogger(__name__)
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__A : List[str] = parser.parse_args()
if args.tokenizer_name:
__A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__A : List[Any] = args.per_device_eval_batch_size
__A : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__A : Any = True
__A : Union[str, Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__A : List[str] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__A : Dict = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__A : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__A : str = [network.get_input(i) for i in range(network.num_inputs)]
__A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__A : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__A : List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__A : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = np.asarray(inputs['input_ids'], dtype=np.intaa )
lowerCAmelCase : Optional[int] = np.asarray(inputs['attention_mask'], dtype=np.intaa )
lowerCAmelCase : Dict = np.asarray(inputs['token_type_ids'], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), _UpperCAmelCase )
# start time
lowerCAmelCase : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase : List[str] = time.time()
lowerCAmelCase : Tuple = end_time - start_time
lowerCAmelCase : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__A : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__A : int = raw_datasets['''validation'''].column_names
__A : int = '''question''' if '''question''' in column_names else column_names[0]
__A : List[str] = '''context''' if '''context''' in column_names else column_names[1]
__A : int = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__A : str = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__A : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='only_second' if pad_on_right else 'only_first', max_length=_UpperCAmelCase, stride=args.doc_stride, return_overflowing_tokens=_UpperCAmelCase, return_offsets_mapping=_UpperCAmelCase, padding='max_length', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase : List[Any] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase : Optional[Any] = tokenized_examples.sequence_ids(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__A : int = raw_datasets['''validation''']
# Validation Feature Creation
__A : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__A : List[str] = default_data_collator
__A : int = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__A : Union[str, Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase="eval" ) -> int:
'''simple docstring'''
lowerCAmelCase : str = postprocess_qa_predictions(
examples=_UpperCAmelCase, features=_UpperCAmelCase, predictions=_UpperCAmelCase, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=_UpperCAmelCase, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase : Union[str, Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase : List[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase : Optional[Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCAmelCase, label_ids=_UpperCAmelCase )
__A : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
__A : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__A : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
__A : Union[str, Any] = 0.0
__A : Optional[Any] = 0
__A : Optional[Any] = timeit.default_timer()
__A : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
__A , __A : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__A , __A : str = outputs
__A : Optional[Any] = torch.tensor(start_logits)
__A : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__A : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__A : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__A : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__A : str = nested_truncate(all_preds, len(eval_dataset))
__A : Any = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__A : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__A : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 323
| 0
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
lowerCAmelCase : int = 0
lowerCAmelCase : str = str(_UpperCAmelCase )
while len(_UpperCAmelCase ) != 1:
lowerCAmelCase : List[Any] = [int(_UpperCAmelCase ) for i in num_string]
lowerCAmelCase : int = 1
for i in range(0, len(_UpperCAmelCase ) ):
total *= numbers[i]
lowerCAmelCase : Optional[Any] = str(_UpperCAmelCase )
steps += 1
return steps
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : str = str(_UpperCAmelCase )
while len(_UpperCAmelCase ) != 1:
lowerCAmelCase : Optional[Any] = [int(_UpperCAmelCase ) for i in num_string]
lowerCAmelCase : Dict = 0
for i in range(0, len(_UpperCAmelCase ) ):
total += numbers[i]
lowerCAmelCase : List[str] = str(_UpperCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __A ( lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = "dinat"
lowerCAmelCase_ : Dict = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : List[Any]=[3, 4, 6, 5] , UpperCAmelCase_ : Dict=[2, 4, 8, 16] , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCAmelCase_ : int=3.0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[str]=1E-5 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : str = embed_dim
lowerCAmelCase : Any = depths
lowerCAmelCase : List[Any] = len(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = num_heads
lowerCAmelCase : Tuple = kernel_size
lowerCAmelCase : List[str] = dilations
lowerCAmelCase : Any = mlp_ratio
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
lowerCAmelCase : int = layer_scale_init_value
lowerCAmelCase : Optional[Any] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Tuple = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 323
| 0
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase : Tuple = f"{sampling_rate}"
lowerCAmelCase : Union[str, Any] = '1'
lowerCAmelCase : Tuple = 'f32le'
lowerCAmelCase : Optional[Any] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(_UpperCAmelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCAmelCase : Optional[int] = ffmpeg_process.communicate(_UpperCAmelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowerCAmelCase : str = output_stream[0]
lowerCAmelCase : List[str] = np.frombuffer(_UpperCAmelCase, np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = "f32le", ):
'''simple docstring'''
lowerCAmelCase : List[str] = f"{sampling_rate}"
lowerCAmelCase : Union[str, Any] = '1'
if format_for_conversion == "s16le":
lowerCAmelCase : Optional[int] = 2
elif format_for_conversion == "f32le":
lowerCAmelCase : Tuple = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
lowerCAmelCase : Optional[int] = platform.system()
if system == "Linux":
lowerCAmelCase : Union[str, Any] = 'alsa'
lowerCAmelCase : Optional[Any] = 'default'
elif system == "Darwin":
lowerCAmelCase : Optional[int] = 'avfoundation'
lowerCAmelCase : str = ':0'
elif system == "Windows":
lowerCAmelCase : Optional[Any] = 'dshow'
lowerCAmelCase : List[Any] = 'default'
lowerCAmelCase : Optional[Any] = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowerCAmelCase : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCAmelCase : Any = _ffmpeg_stream(_UpperCAmelCase, _UpperCAmelCase )
for item in iterator:
yield item
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = "f32le", ):
'''simple docstring'''
if stream_chunk_s is not None:
lowerCAmelCase : Union[str, Any] = stream_chunk_s
else:
lowerCAmelCase : Tuple = chunk_length_s
lowerCAmelCase : Union[str, Any] = ffmpeg_microphone(_UpperCAmelCase, _UpperCAmelCase, format_for_conversion=_UpperCAmelCase )
if format_for_conversion == "s16le":
lowerCAmelCase : Any = np.intaa
lowerCAmelCase : str = 2
elif format_for_conversion == "f32le":
lowerCAmelCase : Tuple = np.floataa
lowerCAmelCase : int = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
lowerCAmelCase : int = chunk_length_s / 6
lowerCAmelCase : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_UpperCAmelCase, (int, float) ):
lowerCAmelCase : List[str] = [stride_length_s, stride_length_s]
lowerCAmelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCAmelCase : Tuple = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCAmelCase : List[Any] = datetime.datetime.now()
lowerCAmelCase : Optional[Any] = datetime.timedelta(seconds=_UpperCAmelCase )
for item in chunk_bytes_iter(_UpperCAmelCase, _UpperCAmelCase, stride=(stride_left, stride_right), stream=_UpperCAmelCase ):
# Put everything back in numpy scale
lowerCAmelCase : Optional[int] = np.frombuffer(item['raw'], dtype=_UpperCAmelCase )
lowerCAmelCase : List[Any] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowerCAmelCase : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = False ):
'''simple docstring'''
lowerCAmelCase : Any = b''
lowerCAmelCase : Any = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
lowerCAmelCase : Optional[Any] = 0
for raw in iterator:
acc += raw
if stream and len(_UpperCAmelCase ) < chunk_len:
lowerCAmelCase : Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_UpperCAmelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCAmelCase : Optional[int] = (_stride_left, stride_right)
lowerCAmelCase : int = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowerCAmelCase : Any = False
yield item
lowerCAmelCase : Optional[int] = stride_left
lowerCAmelCase : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_UpperCAmelCase ) > stride_left:
lowerCAmelCase : Any = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowerCAmelCase : Dict = False
yield item
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase : Tuple = 2**24 # 16Mo
try:
with subprocess.Popen(_UpperCAmelCase, stdout=subprocess.PIPE, bufsize=_UpperCAmelCase ) as ffmpeg_process:
while True:
lowerCAmelCase : int = ffmpeg_process.stdout.read(_UpperCAmelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 362
|
from manim import *
class __A ( lowerCAmelCase ):
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase : List[str] = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Dict = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : str = Text('CPU' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : int = [mem.copy() for i in range(4 )]
lowerCAmelCase : Union[str, Any] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = Text('GPU' , font_size=24 )
lowerCAmelCase : Tuple = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : List[str] = Text('Model' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = []
for i, rect in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Optional[Any] = fill.copy().set_fill(UpperCAmelCase_ , opacity=0.8 )
target.move_to(UpperCAmelCase_ )
model_arr.append(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ )
lowerCAmelCase : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Tuple = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Union[str, Any] = Text('Disk' , font_size=24 )
lowerCAmelCase : Optional[Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase : Optional[int] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Dict = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(UpperCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase_ )
lowerCAmelCase : str = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[Any] = Square(0.3 )
input.set_fill(UpperCAmelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase_ , buff=0.5 )
self.play(Write(UpperCAmelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase_ , buff=0.02 )
self.play(MoveToTarget(UpperCAmelCase_ ) )
self.play(FadeOut(UpperCAmelCase_ ) )
lowerCAmelCase : List[Any] = Arrow(start=UpperCAmelCase_ , end=UpperCAmelCase_ , color=UpperCAmelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCAmelCase : int = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) )
lowerCAmelCase : Optional[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCAmelCase_ ) , Circumscribe(model_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCAmelCase : Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCAmelCase : int = AnimationGroup(
FadeOut(UpperCAmelCase_ , run_time=0.5 ) , MoveToTarget(UpperCAmelCase_ , run_time=0.5 ) , FadeIn(UpperCAmelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCAmelCase : List[str] = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCAmelCase : int = a_c
lowerCAmelCase : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase_ ) , FadeOut(UpperCAmelCase_ , run_time=0.5 ) , )
lowerCAmelCase : int = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) , MoveToTarget(UpperCAmelCase_ ) )
self.wait()
| 323
| 0
|
from math import ceil
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 1_001 ) -> int:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 1
for i in range(1, int(ceil(n / 2.0 ) ) ):
lowerCAmelCase : str = 2 * i + 1
lowerCAmelCase : str = 2 * i
lowerCAmelCase : Tuple = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__A : int = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 363
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : int = BeautifulSoup(requests.get(_UpperCAmelCase, params=_UpperCAmelCase ).content, 'html.parser' )
lowerCAmelCase : Union[str, Any] = soup.find('div', attrs={'class': 'gs_ri'} )
lowerCAmelCase : int = div.find('div', attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
__A : Optional[Any] = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 364
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__A : Dict = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__A : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__A : List[str] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__A : Any = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__A : Tuple = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__A : Any = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__A : List[str] = tf.keras.preprocessing.image.img_to_array(test_image)
__A : Optional[Any] = np.expand_dims(test_image, axis=0)
__A : int = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__A : Optional[int] = '''Normal'''
if result[0][0] == 1:
__A : str = '''Abnormality detected'''
| 323
| 0
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__A : List[Any] = trt.Logger(trt.Logger.WARNING)
__A : Optional[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__A : List[Any] = logging.getLogger(__name__)
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__A : List[str] = parser.parse_args()
if args.tokenizer_name:
__A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__A : List[Any] = args.per_device_eval_batch_size
__A : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__A : Any = True
__A : Union[str, Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__A : List[str] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__A : Dict = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__A : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__A : str = [network.get_input(i) for i in range(network.num_inputs)]
__A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__A : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__A : List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__A : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = np.asarray(inputs['input_ids'], dtype=np.intaa )
lowerCAmelCase : Optional[int] = np.asarray(inputs['attention_mask'], dtype=np.intaa )
lowerCAmelCase : Dict = np.asarray(inputs['token_type_ids'], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), _UpperCAmelCase )
# start time
lowerCAmelCase : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase : List[str] = time.time()
lowerCAmelCase : Tuple = end_time - start_time
lowerCAmelCase : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__A : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__A : int = raw_datasets['''validation'''].column_names
__A : int = '''question''' if '''question''' in column_names else column_names[0]
__A : List[str] = '''context''' if '''context''' in column_names else column_names[1]
__A : int = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__A : str = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__A : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='only_second' if pad_on_right else 'only_first', max_length=_UpperCAmelCase, stride=args.doc_stride, return_overflowing_tokens=_UpperCAmelCase, return_offsets_mapping=_UpperCAmelCase, padding='max_length', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase : List[Any] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase : Optional[Any] = tokenized_examples.sequence_ids(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__A : int = raw_datasets['''validation''']
# Validation Feature Creation
__A : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__A : List[str] = default_data_collator
__A : int = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__A : Union[str, Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase="eval" ) -> int:
'''simple docstring'''
lowerCAmelCase : str = postprocess_qa_predictions(
examples=_UpperCAmelCase, features=_UpperCAmelCase, predictions=_UpperCAmelCase, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=_UpperCAmelCase, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase : Union[str, Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase : List[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase : Optional[Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCAmelCase, label_ids=_UpperCAmelCase )
__A : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
__A : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__A : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
__A : Union[str, Any] = 0.0
__A : Optional[Any] = 0
__A : Optional[Any] = timeit.default_timer()
__A : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
__A : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__A : str = outputs
__A : Optional[Any] = torch.tensor(start_logits)
__A : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__A : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__A : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__A : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__A : str = nested_truncate(all_preds, len(eval_dataset))
__A : Any = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__A : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__A : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 365
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__A : str = logging.getLogger(__name__)
class __A ( lowerCAmelCase ):
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None ):
lowerCAmelCase : List[Any] = self.layer[current_layer](UpperCAmelCase_ , UpperCAmelCase_ , head_mask[current_layer] )
lowerCAmelCase : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = BertEncoderWithPabee(UpperCAmelCase_ )
self.init_weights()
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Dict = 0
def lowercase__ ( self : int , UpperCAmelCase_ : Any ):
lowerCAmelCase : int = threshold
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = patience
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Tuple = 0
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[int] = self.inference_layers_num / self.inference_instances_num
lowerCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(UpperCAmelCase_ )
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCAmelCase : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCAmelCase : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
if token_type_ids is None:
lowerCAmelCase : Union[str, Any] = torch.zeros(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = encoder_hidden_states.size()
lowerCAmelCase : Optional[int] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCAmelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ )
lowerCAmelCase : Tuple = self.invert_attention_mask(UpperCAmelCase_ )
else:
lowerCAmelCase : List[Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase : Optional[Any] = self.get_head_mask(UpperCAmelCase_ , self.config.num_hidden_layers )
lowerCAmelCase : int = self.embeddings(
input_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ )
lowerCAmelCase : List[str] = embedding_output
if self.training:
lowerCAmelCase : Tuple = []
for i in range(self.config.num_hidden_layers ):
lowerCAmelCase : Dict = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : List[str] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = output_layers[i](output_dropout(UpperCAmelCase_ ) )
res.append(UpperCAmelCase_ )
elif self.patience == 0: # Use all layers for inference
lowerCAmelCase : Union[str, Any] = self.encoder(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
lowerCAmelCase : Optional[Any] = self.pooler(encoder_outputs[0] )
lowerCAmelCase : List[Any] = [output_layers[self.config.num_hidden_layers - 1](UpperCAmelCase_ )]
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[str] = None
lowerCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCAmelCase : Union[str, Any] = self.encoder.adaptive_forward(
UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.pooler(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = output_layers[i](UpperCAmelCase_ )
if regression:
lowerCAmelCase : List[str] = logits.detach()
if patient_result is not None:
lowerCAmelCase : List[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCAmelCase : Any = 0
else:
lowerCAmelCase : Union[str, Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCAmelCase : Optional[Any] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCAmelCase_ ) ):
patient_counter += 1
else:
lowerCAmelCase : Tuple = 0
lowerCAmelCase : List[Any] = logits
if patient_counter == self.patience:
break
lowerCAmelCase : Dict = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowerCAmelCase , )
class __A ( lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : Tuple ):
super().__init__(UpperCAmelCase_ )
lowerCAmelCase : Tuple = config.num_labels
lowerCAmelCase : int = BertModelWithPabee(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase : List[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Any=None , ):
lowerCAmelCase : int = self.bert(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCAmelCase : Any = (logits[-1],)
if labels is not None:
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[int] = 0
for ix, logits_item in enumerate(UpperCAmelCase_ ):
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase : Tuple = MSELoss()
lowerCAmelCase : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase : Tuple = CrossEntropyLoss()
lowerCAmelCase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCAmelCase : Any = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCAmelCase : str = (total_loss / total_weights,) + outputs
return outputs
| 323
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__A : Tuple = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__A : int = 25_0004
__A : Tuple = 25_0020
@require_sentencepiece
@require_tokenizers
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : List[str] = MBartTokenizer
lowerCAmelCase_ : List[Any] = MBartTokenizerFast
lowerCAmelCase_ : int = True
lowerCAmelCase_ : Tuple = True
def lowercase__ ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Any = MBartTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Dict = MBartTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
lowerCAmelCase : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase : Dict = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def lowercase__ ( self : Tuple ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase : str = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : int = tempfile.mkdtemp()
lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCAmelCase : List[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase : List[Any] = tempfile.mkdtemp()
lowerCAmelCase : int = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
lowerCAmelCase : Dict = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase : Union[str, Any] = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase : Optional[int] = tokenizer_r.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
lowerCAmelCase_ : str = "facebook/mbart-large-en-ro"
lowerCAmelCase_ : List[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
lowerCAmelCase_ : Any = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
lowerCAmelCase_ : List[Any] = [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE]
@classmethod
def lowercase__ ( cls : int ):
lowerCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCAmelCase : int = 1
return cls
def lowercase__ ( self : str ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
def lowercase__ ( self : str ):
lowerCAmelCase : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ )
def lowercase__ ( self : int ):
self.assertIn(UpperCAmelCase_ , self.tokenizer.all_special_ids )
lowerCAmelCase : Optional[int] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCAmelCase : List[str] = self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , UpperCAmelCase_ )
lowerCAmelCase : Any = 10
lowerCAmelCase : str = self.tokenizer(UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCAmelCase_ )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250026, 250001] )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = tempfile.mkdtemp()
lowerCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = MBartTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase_ )
@require_torch
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , return_tensors='pt' )
lowerCAmelCase : str = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowercase__ ( self : Any ):
lowerCAmelCase : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCAmelCase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[int] = self.tokenizer(self.src_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=3 , return_tensors='pt' )
lowerCAmelCase : Any = self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=10 , return_tensors='pt' )
lowerCAmelCase : List[Any] = targets['input_ids']
lowerCAmelCase : List[str] = shift_tokens_right(UpperCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : str ):
lowerCAmelCase : Tuple = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , {
# A, test, EOS, en_XX
'input_ids': [[62, 3034, 2, 250004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , )
| 366
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = "deberta-v2"
def __init__( self : int , UpperCAmelCase_ : Dict=128100 , UpperCAmelCase_ : Optional[int]=1536 , UpperCAmelCase_ : Tuple=24 , UpperCAmelCase_ : Any=24 , UpperCAmelCase_ : Any=6144 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[int]=1E-7 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Dict=-1 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : int="gelu" , **UpperCAmelCase_ : int , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : str = type_vocab_size
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : Union[str, Any] = relative_attention
lowerCAmelCase : List[Any] = max_relative_positions
lowerCAmelCase : List[Any] = pad_token_id
lowerCAmelCase : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase_ ) == str:
lowerCAmelCase : Tuple = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase : str = pos_att_type
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : str = kwargs.get('pooler_hidden_size' , UpperCAmelCase_ )
lowerCAmelCase : Tuple = pooler_dropout
lowerCAmelCase : Union[str, Any] = pooler_hidden_act
class __A ( lowerCAmelCase ):
@property
def lowercase__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
lowerCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowercase__ ( self : int ):
return 12
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : "PreTrainedTokenizerBase" = None , ):
lowerCAmelCase : List[str] = super().generate_dummy_inputs(preprocessor=UpperCAmelCase_ , framework=UpperCAmelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 323
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__A : List[Any] = 0
__A : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : Any = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__A : int = tuple[int, int]
class __A :
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Node | None , ):
lowerCAmelCase : str = pos_x
lowerCAmelCase : Tuple = pos_y
lowerCAmelCase : str = (pos_y, pos_x)
lowerCAmelCase : int = goal_x
lowerCAmelCase : Any = goal_y
lowerCAmelCase : Union[str, Any] = g_cost
lowerCAmelCase : int = parent
lowerCAmelCase : Dict = self.calculate_heuristic()
lowerCAmelCase : Dict = self.g_cost + self.h_cost
def lowercase__ ( self : Any ):
lowerCAmelCase : Dict = self.pos_x - self.goal_x
lowerCAmelCase : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCAmelCase_ ) + abs(UpperCAmelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Tuple , UpperCAmelCase_ : Node ):
return self.f_cost < other.f_cost
class __A :
def __init__( self : int , UpperCAmelCase_ : TPosition , UpperCAmelCase_ : TPosition ):
lowerCAmelCase : Union[str, Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCAmelCase_ )
lowerCAmelCase : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , UpperCAmelCase_ )
lowerCAmelCase : Dict = [self.start]
lowerCAmelCase : list[Node] = []
lowerCAmelCase : str = False
def lowercase__ ( self : List[Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCAmelCase : int = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCAmelCase_ )
self.closed_nodes.append(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = self.get_successors(UpperCAmelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCAmelCase_ )
else:
# retrieve the best current path
lowerCAmelCase : str = self.open_nodes.pop(self.open_nodes.index(UpperCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCAmelCase_ )
else:
self.open_nodes.append(UpperCAmelCase_ )
return [self.start.pos]
def lowercase__ ( self : int , UpperCAmelCase_ : Node ):
lowerCAmelCase : str = []
for action in delta:
lowerCAmelCase : str = parent.pos_x + action[1]
lowerCAmelCase : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCAmelCase_ , UpperCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCAmelCase_ , ) )
return successors
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Node | None ):
lowerCAmelCase : Optional[int] = node
lowerCAmelCase : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase : List[Any] = current_node.parent
path.reverse()
return path
class __A :
def __init__( self : Dict , UpperCAmelCase_ : TPosition , UpperCAmelCase_ : TPosition ):
lowerCAmelCase : List[Any] = AStar(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : int = AStar(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = False
def lowercase__ ( self : Any ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowerCAmelCase : int = self.fwd_astar.open_nodes.pop(0 )
lowerCAmelCase : str = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCAmelCase_ , UpperCAmelCase_ )
self.fwd_astar.closed_nodes.append(UpperCAmelCase_ )
self.bwd_astar.closed_nodes.append(UpperCAmelCase_ )
lowerCAmelCase : Tuple = current_bwd_node
lowerCAmelCase : Any = current_fwd_node
lowerCAmelCase : Optional[Any] = {
self.fwd_astar: self.fwd_astar.get_successors(UpperCAmelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(UpperCAmelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCAmelCase_ )
else:
# retrieve the best current path
lowerCAmelCase : Union[str, Any] = astar.open_nodes.pop(
astar.open_nodes.index(UpperCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCAmelCase_ )
else:
astar.open_nodes.append(UpperCAmelCase_ )
return [self.fwd_astar.start.pos]
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Node , UpperCAmelCase_ : Node ):
lowerCAmelCase : Optional[int] = self.fwd_astar.retrace_path(UpperCAmelCase_ )
lowerCAmelCase : List[str] = self.bwd_astar.retrace_path(UpperCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
lowerCAmelCase : List[str] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__A : Any = (0, 0)
__A : Optional[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__A : int = time.time()
__A : Any = AStar(init, goal)
__A : List[str] = a_star.search()
__A : List[str] = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
__A : Optional[int] = time.time()
__A : List[str] = BidirectionalAStar(init, goal)
__A : Union[str, Any] = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 367
|
__A : Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__A : List[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__A : Dict = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__A : Optional[int] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__A : Optional[int] = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__A : Tuple = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__A : int = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__A : Optional[Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 323
| 0
|
from ..utils import DummyObject, requires_backends
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[str] = ["torch"]
def __init__( self : Tuple , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Tuple = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Dict = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Any = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : int = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Dict = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[str] = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase, ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(_UpperCAmelCase, ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(_UpperCAmelCase, ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(_UpperCAmelCase, ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase, ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(_UpperCAmelCase, ['torch'] )
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase, **_UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(_UpperCAmelCase, ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[str] = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : int = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Any = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = ["torch"]
def __init__( self : Tuple , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = ["torch"]
def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[int] = ["torch"]
def __init__( self : List[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Any = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : List[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[int] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Tuple = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Any = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[str] = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Dict = ["torch"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : int = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Tuple = ["torch"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[int] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Any ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : str = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Dict = ["torch"]
def __init__( self : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Tuple = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : int = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Tuple , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : int = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : int = ["torch"]
def __init__( self : str , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Tuple , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : str = ["torch"]
def __init__( self : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Tuple ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Any = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[str] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : int ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Tuple = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = ["torch"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Tuple , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[int] = ["torch"]
def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Any , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = ["torch"]
def __init__( self : Dict , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[int] ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = ["torch"]
def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : str , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : str = ["torch"]
def __init__( self : List[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict ):
requires_backends(cls , ['torch'] )
class __A ( metaclass=lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = ["torch"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ):
requires_backends(cls , ['torch'] )
@classmethod
def lowercase__ ( cls : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ):
requires_backends(cls , ['torch'] )
| 368
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[str] = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 369
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : List[str]=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[Any]=37 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Any=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=None , ):
lowerCAmelCase : int = parent
lowerCAmelCase : Any = 13
lowerCAmelCase : Union[str, Any] = 7
lowerCAmelCase : List[Any] = True
lowerCAmelCase : List[str] = True
lowerCAmelCase : Tuple = True
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Tuple = 99
lowerCAmelCase : Optional[Any] = 32
lowerCAmelCase : List[str] = 2
lowerCAmelCase : str = 4
lowerCAmelCase : Optional[Any] = 37
lowerCAmelCase : List[Any] = 'gelu'
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Optional[Any] = 512
lowerCAmelCase : Dict = 16
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : Union[str, Any] = 0.02
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : List[str] = 4
lowerCAmelCase : Any = None
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = None
if self.use_input_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Dict = None
if self.use_token_type_ids:
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[str] = None
lowerCAmelCase : Any = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ):
lowerCAmelCase : List[Any] = TFRoFormerModel(config=UpperCAmelCase_ )
lowerCAmelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : str = [input_ids, input_mask]
lowerCAmelCase : Any = model(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ):
lowerCAmelCase : str = True
lowerCAmelCase : List[str] = TFRoFormerForCausalLM(config=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : List[str] = model(UpperCAmelCase_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = TFRoFormerForMaskedLM(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : str = self.num_labels
lowerCAmelCase : Optional[Any] = TFRoFormerForSequenceClassification(config=UpperCAmelCase_ )
lowerCAmelCase : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : Dict = self.num_choices
lowerCAmelCase : str = TFRoFormerForMultipleChoice(config=UpperCAmelCase_ )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : int = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Union[str, Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = self.num_labels
lowerCAmelCase : Any = TFRoFormerForTokenClassification(config=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[int] = TFRoFormerForQuestionAnswering(config=UpperCAmelCase_ )
lowerCAmelCase : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase : int = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : List[str] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : Optional[Any] = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : int = False
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase__ ( self : int ):
lowerCAmelCase : List[Any] = TFRoFormerModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : Dict ):
lowerCAmelCase : str = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : Tuple = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0]
# TODO Replace vocab size
lowerCAmelCase : Any = 50000
lowerCAmelCase : str = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = tf.constant([[4, 10]] )
lowerCAmelCase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCAmelCase : int = emba(input_ids.shape )
lowerCAmelCase : str = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
def lowercase__ ( self : int ):
lowerCAmelCase : Dict = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
lowerCAmelCase : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowerCAmelCase : List[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , atol=self.tolerance )
@require_tf
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = 1E-4
def lowercase__ ( self : List[Any] ):
# 2,12,16,64
lowerCAmelCase : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : List[str] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCAmelCase : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCAmelCase : List[Any] = embed_positions([2, 16, 768] )[None, None, :, :]
lowerCAmelCase , lowerCAmelCase : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
lowerCAmelCase : Union[str, Any] = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase_ , atol=self.tolerance )
| 323
| 0
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : Tuple = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = "align_text_model"
def __init__( self : Dict , UpperCAmelCase_ : int=30522 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Any=3072 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Tuple=1E-12 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : Optional[Any]="absolute" , UpperCAmelCase_ : Dict=True , **UpperCAmelCase_ : List[Any] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : str = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Dict = position_embedding_type
lowerCAmelCase : Tuple = use_cache
lowerCAmelCase : int = pad_token_id
@classmethod
def lowercase__ ( cls : str , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : Optional[Any] ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
lowerCAmelCase : Any = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
lowerCAmelCase : List[str] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : int = "align_vision_model"
def __init__( self : Optional[int] , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 600 , UpperCAmelCase_ : float = 2.0 , UpperCAmelCase_ : float = 3.1 , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase_ : List[int] = [] , UpperCAmelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase_ : float = 0.25 , UpperCAmelCase_ : str = "swish" , UpperCAmelCase_ : int = 2560 , UpperCAmelCase_ : str = "mean" , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 0.0_01 , UpperCAmelCase_ : float = 0.99 , UpperCAmelCase_ : float = 0.2 , **UpperCAmelCase_ : Optional[int] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : str = num_channels
lowerCAmelCase : Optional[Any] = image_size
lowerCAmelCase : Any = width_coefficient
lowerCAmelCase : Optional[Any] = depth_coefficient
lowerCAmelCase : List[str] = depth_divisor
lowerCAmelCase : Tuple = kernel_sizes
lowerCAmelCase : Any = in_channels
lowerCAmelCase : List[Any] = out_channels
lowerCAmelCase : Union[str, Any] = depthwise_padding
lowerCAmelCase : List[Any] = strides
lowerCAmelCase : Tuple = num_block_repeats
lowerCAmelCase : Optional[Any] = expand_ratios
lowerCAmelCase : int = squeeze_expansion_ratio
lowerCAmelCase : List[str] = hidden_act
lowerCAmelCase : Tuple = hidden_dim
lowerCAmelCase : Tuple = pooling_type
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Union[str, Any] = batch_norm_eps
lowerCAmelCase : List[Any] = batch_norm_momentum
lowerCAmelCase : int = drop_connect_rate
lowerCAmelCase : Union[str, Any] = sum(UpperCAmelCase_ ) * 4
@classmethod
def lowercase__ ( cls : int , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : Any ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
lowerCAmelCase : Any = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
lowerCAmelCase : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : int = "align"
lowerCAmelCase_ : str = True
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=640 , UpperCAmelCase_ : Optional[int]=1.0 , UpperCAmelCase_ : Optional[int]=0.02 , **UpperCAmelCase_ : str , ):
super().__init__(**UpperCAmelCase_ )
if text_config is None:
lowerCAmelCase : Tuple = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
lowerCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
lowerCAmelCase : int = AlignTextConfig(**UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = AlignVisionConfig(**UpperCAmelCase_ )
lowerCAmelCase : Tuple = projection_dim
lowerCAmelCase : Optional[Any] = temperature_init_value
lowerCAmelCase : int = initializer_range
@classmethod
def lowercase__ ( cls : List[Any] , UpperCAmelCase_ : AlignTextConfig , UpperCAmelCase_ : AlignVisionConfig , **UpperCAmelCase_ : Dict ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase : Tuple = self.text_config.to_dict()
lowerCAmelCase : Union[str, Any] = self.vision_config.to_dict()
lowerCAmelCase : Any = self.__class__.model_type
return output
| 370
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase : Tuple = {'unk_token': '<unk>'}
lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
lowerCAmelCase : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Any , **UpperCAmelCase_ : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Tuple , **UpperCAmelCase_ : str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : List[str] = self.get_rust_tokenizer()
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = self.prepare_image_inputs()
lowerCAmelCase : List[str] = image_processor(UpperCAmelCase_ , return_tensors='np' )
lowerCAmelCase : int = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : Dict = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = 'lower newer'
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = 'lower newer'
lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase : Union[str, Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : Any = processor.batch_decode(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = 'lower newer'
lowerCAmelCase : Tuple = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 323
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : str ):
lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase : str = BlipImageProcessor()
lowerCAmelCase : int = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowerCAmelCase : Dict = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
lowerCAmelCase : int = InstructBlipProcessor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ).tokenizer
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : List[str] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ).image_processor
def lowercase__ ( self : List[str] , **UpperCAmelCase_ : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ).qformer_tokenizer
def lowercase__ ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : int ):
lowerCAmelCase : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : Dict = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : int = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowerCAmelCase : List[Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : int = self.get_image_processor()
lowerCAmelCase : int = self.get_tokenizer()
lowerCAmelCase : Tuple = self.get_qformer_tokenizer()
lowerCAmelCase : Optional[int] = InstructBlipProcessor(
tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , qformer_tokenizer=UpperCAmelCase_ )
lowerCAmelCase : int = self.prepare_image_inputs()
lowerCAmelCase : Optional[int] = image_processor(UpperCAmelCase_ , return_tensors='np' )
lowerCAmelCase : Dict = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : Any = self.get_qformer_tokenizer()
lowerCAmelCase : int = InstructBlipProcessor(
tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , qformer_tokenizer=UpperCAmelCase_ )
lowerCAmelCase : List[str] = 'lower newer'
lowerCAmelCase : int = processor(text=UpperCAmelCase_ )
lowerCAmelCase : Dict = tokenizer(UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = qformer_tokenizer(UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def lowercase__ ( self : int ):
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : List[Any] = self.get_qformer_tokenizer()
lowerCAmelCase : Union[str, Any] = InstructBlipProcessor(
tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , qformer_tokenizer=UpperCAmelCase_ )
lowerCAmelCase : Dict = 'lower newer'
lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowercase__ ( self : Any ):
lowerCAmelCase : int = self.get_image_processor()
lowerCAmelCase : Optional[Any] = self.get_tokenizer()
lowerCAmelCase : List[Any] = self.get_qformer_tokenizer()
lowerCAmelCase : List[Any] = InstructBlipProcessor(
tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , qformer_tokenizer=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : str = processor.batch_decode(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : int = self.get_image_processor()
lowerCAmelCase : List[Any] = self.get_tokenizer()
lowerCAmelCase : Any = self.get_qformer_tokenizer()
lowerCAmelCase : Tuple = InstructBlipProcessor(
tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , qformer_tokenizer=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = 'lower newer'
lowerCAmelCase : Dict = self.prepare_image_inputs()
lowerCAmelCase : Optional[int] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 371
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__A : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
__A : List[str] = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[PIL.Image.Image, np.ndarray]
class __A ( lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : PriorTransformer , UpperCAmelCase_ : CLIPVisionModel , UpperCAmelCase_ : CLIPImageProcessor , UpperCAmelCase_ : HeunDiscreteScheduler , UpperCAmelCase_ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=UpperCAmelCase_ , image_encoder=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , renderer=UpperCAmelCase_ , )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ):
if latents is None:
lowerCAmelCase : Tuple = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
lowerCAmelCase : Dict = latents.to(UpperCAmelCase_ )
lowerCAmelCase : int = latents * scheduler.init_noise_sigma
return latents
def lowercase__ ( self : int , UpperCAmelCase_ : Tuple=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" )
lowerCAmelCase : Optional[Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase_ , UpperCAmelCase_ )
@property
def lowercase__ ( self : Optional[int] ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCAmelCase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(image[0] , torch.Tensor ):
lowerCAmelCase : Any = torch.cat(UpperCAmelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCAmelCase_ , axis=0 )
if not isinstance(UpperCAmelCase_ , torch.Tensor ):
lowerCAmelCase : Any = self.image_processor(UpperCAmelCase_ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowerCAmelCase : List[str] = image.to(dtype=self.image_encoder.dtype , device=UpperCAmelCase_ )
lowerCAmelCase : Dict = self.image_encoder(UpperCAmelCase_ )['last_hidden_state']
lowerCAmelCase : Dict = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCAmelCase : Optional[int] = image_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase : Optional[Any] = torch.zeros_like(UpperCAmelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase : Dict = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase_ )
def __call__( self : Tuple , UpperCAmelCase_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = 25 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : float = 4.0 , UpperCAmelCase_ : int = 64 , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , ):
if isinstance(UpperCAmelCase_ , PIL.Image.Image ):
lowerCAmelCase : Optional[Any] = 1
elif isinstance(UpperCAmelCase_ , torch.Tensor ):
lowerCAmelCase : Any = image.shape[0]
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCAmelCase : Optional[Any] = len(UpperCAmelCase_ )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCAmelCase_ )}" )
lowerCAmelCase : int = self._execution_device
lowerCAmelCase : Dict = batch_size * num_images_per_prompt
lowerCAmelCase : Tuple = guidance_scale > 1.0
lowerCAmelCase : Optional[int] = self._encode_image(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# prior
self.scheduler.set_timesteps(UpperCAmelCase_ , device=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = self.scheduler.timesteps
lowerCAmelCase : Union[str, Any] = self.prior.config.num_embeddings
lowerCAmelCase : Union[str, Any] = self.prior.config.embedding_dim
lowerCAmelCase : str = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCAmelCase : Tuple = latents.reshape(latents.shape[0] , UpperCAmelCase_ , UpperCAmelCase_ )
for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase : List[Any] = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.prior(
UpperCAmelCase_ , timestep=UpperCAmelCase_ , proj_embedding=UpperCAmelCase_ , ).predicted_image_embedding
# remove the variance
lowerCAmelCase : str = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCAmelCase : Any = noise_pred.chunk(2 )
lowerCAmelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCAmelCase : Dict = self.scheduler.step(
UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = []
for i, latent in enumerate(UpperCAmelCase_ ):
print()
lowerCAmelCase : List[str] = self.renderer.decode(
latent[None, :] , UpperCAmelCase_ , size=UpperCAmelCase_ , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCAmelCase_ )
lowerCAmelCase : str = torch.stack(UpperCAmelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
lowerCAmelCase : str = images.cpu().numpy()
if output_type == "pil":
lowerCAmelCase : Dict = [self.numpy_to_pil(UpperCAmelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCAmelCase_ )
| 350
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase_ : Optional[List[bool]]
lowerCAmelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 323
| 0
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
'''simple docstring'''
if num <= 0:
lowerCAmelCase : List[str] = f"{num}: Invalid input, please enter a positive integer."
raise ValueError(_UpperCAmelCase )
lowerCAmelCase : Any = [True] * (num + 1)
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : List[str] = 2
lowerCAmelCase : List[str] = int(math.sqrt(_UpperCAmelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_UpperCAmelCase )
# Set multiples of start be False
for i in range(start * start, num + 1, _UpperCAmelCase ):
if sieve[i] is True:
lowerCAmelCase : str = False
start += 1
for j in range(end + 1, num + 1 ):
if sieve[j] is True:
prime.append(_UpperCAmelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 351
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return x + 2
class __A ( unittest.TestCase ):
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = 'x = 3'
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
lowerCAmelCase : Dict = 'x = y'
lowerCAmelCase : List[Any] = {'y': 5}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 5, 'y': 5} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = 'y = add_two(x)'
lowerCAmelCase : int = {'x': 3}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 'x = 3'
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : Any ):
lowerCAmelCase : Union[str, Any] = 'x = 3\ny = 5'
lowerCAmelCase : str = {}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = 'text = f\'This is x: {x}.\''
lowerCAmelCase : str = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'text': 'This is x: 3.'} )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = 'if x <= 3:\n y = 2\nelse:\n y = 5'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 2} )
lowerCAmelCase : Any = {'x': 8}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 8, 'y': 5} )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = 'test_list = [x, add_two(x)]'
lowerCAmelCase : Optional[Any] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [3, 5] )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : int = 'y = x'
lowerCAmelCase : Optional[int] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 3} )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = 'test_list = [x, add_two(x)]\ntest_list[1]'
lowerCAmelCase : List[str] = {'x': 3}
lowerCAmelCase : List[str] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
lowerCAmelCase : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
lowerCAmelCase : List[Any] = {'x': 3}
lowerCAmelCase : Optional[Any] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : int ):
lowerCAmelCase : Any = 'x = 0\nfor i in range(3):\n x = i'
lowerCAmelCase : str = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {'range': range} , state=UpperCAmelCase_ )
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 2, 'i': 2} )
| 323
| 0
|
"""simple docstring"""
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 0.0
for coeff in reversed(_UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
__A : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 352
|
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCAmelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_UpperCAmelCase, 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCAmelCase : Optional[Any] = (sidea + sidea + sidea) / 2
lowerCAmelCase : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 323
| 0
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __A ( nn.Module ):
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : float = 0.0
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : jnp.dtype = jnp.floataa
def lowercase__ ( self : Dict ):
lowerCAmelCase : Dict = []
lowerCAmelCase : Optional[Any] = []
for i in range(self.num_layers ):
lowerCAmelCase : Dict = self.in_channels if i == 0 else self.out_channels
lowerCAmelCase : Optional[int] = FlaxResnetBlockaD(
in_channels=UpperCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
lowerCAmelCase : Tuple = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = resnets
lowerCAmelCase : str = attentions
if self.add_downsample:
lowerCAmelCase : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int=True ):
lowerCAmelCase : Optional[Any] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
lowerCAmelCase : List[Any] = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
lowerCAmelCase : Dict = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
lowerCAmelCase : str = self.downsamplers_a(UpperCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class __A ( nn.Module ):
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : float = 0.0
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : jnp.dtype = jnp.floataa
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = []
for i in range(self.num_layers ):
lowerCAmelCase : List[str] = self.in_channels if i == 0 else self.out_channels
lowerCAmelCase : Tuple = FlaxResnetBlockaD(
in_channels=UpperCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = resnets
if self.add_downsample:
lowerCAmelCase : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=True ):
lowerCAmelCase : Any = ()
for resnet in self.resnets:
lowerCAmelCase : Tuple = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
lowerCAmelCase : List[str] = self.downsamplers_a(UpperCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class __A ( nn.Module ):
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : float = 0.0
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : jnp.dtype = jnp.floataa
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : str = []
lowerCAmelCase : List[str] = []
for i in range(self.num_layers ):
lowerCAmelCase : Optional[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowerCAmelCase : Union[str, Any] = self.prev_output_channel if i == 0 else self.out_channels
lowerCAmelCase : List[str] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = resnets
lowerCAmelCase : Union[str, Any] = attentions
if self.add_upsample:
lowerCAmelCase : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
lowerCAmelCase : Tuple = res_hidden_states_tuple[-1]
lowerCAmelCase : List[Any] = res_hidden_states_tuple[:-1]
lowerCAmelCase : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowerCAmelCase : str = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
lowerCAmelCase : Dict = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
if self.add_upsample:
lowerCAmelCase : Dict = self.upsamplers_a(UpperCAmelCase_ )
return hidden_states
class __A ( nn.Module ):
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : float = 0.0
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : jnp.dtype = jnp.floataa
def lowercase__ ( self : Tuple ):
lowerCAmelCase : int = []
for i in range(self.num_layers ):
lowerCAmelCase : Tuple = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowerCAmelCase : Union[str, Any] = self.prev_output_channel if i == 0 else self.out_channels
lowerCAmelCase : str = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = resnets
if self.add_upsample:
lowerCAmelCase : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
lowerCAmelCase : Union[str, Any] = res_hidden_states_tuple[-1]
lowerCAmelCase : Tuple = res_hidden_states_tuple[:-1]
lowerCAmelCase : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowerCAmelCase : Tuple = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
if self.add_upsample:
lowerCAmelCase : Tuple = self.upsamplers_a(UpperCAmelCase_ )
return hidden_states
class __A ( nn.Module ):
lowerCAmelCase_ : int
lowerCAmelCase_ : float = 0.0
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : jnp.dtype = jnp.floataa
def lowercase__ ( self : Any ):
# there is always at least one resnet
lowerCAmelCase : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
lowerCAmelCase : List[str] = []
for _ in range(self.num_layers ):
lowerCAmelCase : List[str] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_ )
lowerCAmelCase : Dict = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = resnets
lowerCAmelCase : Any = attentions
def __call__( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int=True ):
lowerCAmelCase : List[str] = self.resnets[0](UpperCAmelCase_ , UpperCAmelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
lowerCAmelCase : Optional[Any] = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
lowerCAmelCase : List[str] = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
return hidden_states
| 353
|
from __future__ import annotations
from typing import Any
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Tuple = num_of_nodes
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : dict[int, int] = {}
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Dict , UpperCAmelCase_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase : Dict = self.find_component(UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase : Union[str, Any] = self.find_component(UpperCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : str = []
lowerCAmelCase : Tuple = 0
lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge
lowerCAmelCase : Optional[int] = self.m_component[u]
lowerCAmelCase : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = edge
lowerCAmelCase : Optional[Any] = self.m_component[u]
lowerCAmelCase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
lowerCAmelCase : Optional[Any] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__A : int = logging.get_logger(__name__)
__A : Dict = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Optional[int] = "longformer"
def __init__( self : Optional[Any] , UpperCAmelCase_ : Union[List[int], int] = 512 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 30522 , UpperCAmelCase_ : int = 768 , UpperCAmelCase_ : int = 12 , UpperCAmelCase_ : int = 12 , UpperCAmelCase_ : int = 3072 , UpperCAmelCase_ : str = "gelu" , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 1E-12 , UpperCAmelCase_ : bool = False , **UpperCAmelCase_ : Optional[int] , ):
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : int = attention_window
lowerCAmelCase : str = sep_token_id
lowerCAmelCase : Tuple = bos_token_id
lowerCAmelCase : Optional[Any] = eos_token_id
lowerCAmelCase : Any = vocab_size
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Dict = attention_probs_dropout_prob
lowerCAmelCase : List[str] = max_position_embeddings
lowerCAmelCase : str = type_vocab_size
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : int = layer_norm_eps
lowerCAmelCase : Optional[Any] = onnx_export
class __A ( lowerCAmelCase ):
def __init__( self : Any , UpperCAmelCase_ : "PretrainedConfig" , UpperCAmelCase_ : str = "default" , UpperCAmelCase_ : "List[PatchingSpec]" = None ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : int = True
@property
def lowercase__ ( self : int ):
if self.task == "multiple-choice":
lowerCAmelCase : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = super().outputs
if self.task == "default":
lowerCAmelCase : Optional[int] = {0: 'batch'}
return outputs
@property
def lowercase__ ( self : Optional[int] ):
return 1E-4
@property
def lowercase__ ( self : Any ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : "PreTrainedTokenizerBase" , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
lowerCAmelCase : int = super().generate_dummy_inputs(
preprocessor=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCAmelCase : Optional[Any] = torch.zeros_like(inputs['input_ids'] )
# make every second token global
lowerCAmelCase : str = 1
return inputs
| 354
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__A : Dict = False
__A : Optional[Any] = logging.get_logger(__name__)
__A : Tuple = '''ybelkada/fonts'''
def SCREAMING_SNAKE_CASE__ ( ) -> str:
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'Pix2StructImageProcessor. Please upgrade torch.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase, ['torch'] )
_check_torch_version()
lowerCAmelCase : str = image_tensor.unsqueeze(0 )
lowerCAmelCase : Union[str, Any] = torch.nn.functional.unfold(_UpperCAmelCase, (patch_height, patch_width), stride=(patch_height, patch_width) )
lowerCAmelCase : Union[str, Any] = patches.reshape(image_tensor.size(0 ), image_tensor.size(1 ), _UpperCAmelCase, _UpperCAmelCase, -1 )
lowerCAmelCase : Tuple = patches.permute(0, 4, 2, 3, 1 ).reshape(
image_tensor.size(2 ) // patch_height, image_tensor.size(3 ) // patch_width, image_tensor.size(1 ) * patch_height * patch_width, )
return patches.unsqueeze(0 )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = 36, _UpperCAmelCase = "black", _UpperCAmelCase = "white", _UpperCAmelCase = 5, _UpperCAmelCase = 5, _UpperCAmelCase = 5, _UpperCAmelCase = 5, _UpperCAmelCase = None, _UpperCAmelCase = None, ) -> Image.Image:
'''simple docstring'''
requires_backends(_UpperCAmelCase, 'vision' )
# Add new lines so that each line is no more than 80 characters.
lowerCAmelCase : int = textwrap.TextWrapper(width=80 )
lowerCAmelCase : Any = wrapper.wrap(text=_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = '\n'.join(_UpperCAmelCase )
if font_bytes is not None and font_path is None:
lowerCAmelCase : int = io.BytesIO(_UpperCAmelCase )
elif font_path is not None:
lowerCAmelCase : List[Any] = font_path
else:
lowerCAmelCase : str = hf_hub_download(_UpperCAmelCase, 'Arial.TTF' )
lowerCAmelCase : List[Any] = ImageFont.truetype(_UpperCAmelCase, encoding='UTF-8', size=_UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowerCAmelCase : str = ImageDraw.Draw(Image.new('RGB', (1, 1), _UpperCAmelCase ) )
lowerCAmelCase : Dict = temp_draw.textbbox((0, 0), _UpperCAmelCase, _UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
lowerCAmelCase : List[str] = text_width + left_padding + right_padding
lowerCAmelCase : Dict = text_height + top_padding + bottom_padding
lowerCAmelCase : Dict = Image.new('RGB', (image_width, image_height), _UpperCAmelCase )
lowerCAmelCase : Optional[int] = ImageDraw.Draw(_UpperCAmelCase )
draw.text(xy=(left_padding, top_padding), text=_UpperCAmelCase, fill=_UpperCAmelCase, font=_UpperCAmelCase )
return image
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase, 'vision' )
# Convert to PIL image if necessary
lowerCAmelCase : List[Any] = to_pil_image(_UpperCAmelCase )
lowerCAmelCase : int = render_text(_UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : List[str] = max(header_image.width, image.width )
lowerCAmelCase : str = int(image.height * (new_width / image.width) )
lowerCAmelCase : Tuple = int(header_image.height * (new_width / header_image.width) )
lowerCAmelCase : str = Image.new('RGB', (new_width, new_height + new_header_height), 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ), (0, 0) )
new_image.paste(image.resize((new_width, new_height) ), (0, new_header_height) )
# Convert back to the original framework if necessary
lowerCAmelCase : List[str] = to_numpy_array(_UpperCAmelCase )
if infer_channel_dimension_format(_UpperCAmelCase ) == ChannelDimension.LAST:
lowerCAmelCase : Tuple = to_channel_dimension_format(_UpperCAmelCase, ChannelDimension.LAST )
return new_image
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : str = ["flattened_patches"]
def __init__( self : Optional[int] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : int = 2048 , UpperCAmelCase_ : bool = False , **UpperCAmelCase_ : Optional[int] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : List[Any] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
lowerCAmelCase : Dict = do_normalize
lowerCAmelCase : Optional[Any] = do_convert_rgb
lowerCAmelCase : List[str] = max_patches
lowerCAmelCase : Any = is_vqa
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : dict , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
lowerCAmelCase : Dict = to_channel_dimension_format(UpperCAmelCase_ , ChannelDimension.FIRST )
lowerCAmelCase : Optional[Any] = torch.from_numpy(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = patch_size['height'], patch_size['width']
lowerCAmelCase : str = get_image_size(UpperCAmelCase_ )
# maximize scale s.t.
lowerCAmelCase : str = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowerCAmelCase : List[str] = max(min(math.floor(scale * image_height / patch_height ) , UpperCAmelCase_ ) , 1 )
lowerCAmelCase : int = max(min(math.floor(scale * image_width / patch_width ) , UpperCAmelCase_ ) , 1 )
lowerCAmelCase : Dict = max(num_feasible_rows * patch_height , 1 )
lowerCAmelCase : str = max(num_feasible_cols * patch_width , 1 )
lowerCAmelCase : Tuple = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=UpperCAmelCase_ , antialias=UpperCAmelCase_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowerCAmelCase : Any = torch_extract_patches(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = patches.shape
lowerCAmelCase : Union[str, Any] = patches_shape[1]
lowerCAmelCase : List[str] = patches_shape[2]
lowerCAmelCase : Optional[Any] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowerCAmelCase : Tuple = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowerCAmelCase : Dict = torch.arange(UpperCAmelCase_ ).reshape([rows, 1] ).repeat(1 , UpperCAmelCase_ ).reshape([rows * columns, 1] )
lowerCAmelCase : Optional[Any] = torch.arange(UpperCAmelCase_ ).reshape([1, columns] ).repeat(UpperCAmelCase_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowerCAmelCase : List[Any] = row_ids.to(torch.floataa )
lowerCAmelCase : Optional[int] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowerCAmelCase : Optional[Any] = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowerCAmelCase : str = torch.nn.functional.pad(UpperCAmelCase_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowerCAmelCase : Dict = to_numpy_array(UpperCAmelCase_ )
return result
def lowercase__ ( self : List[str] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] ):
if image.dtype == np.uinta:
lowerCAmelCase : List[str] = image.astype(np.floataa )
# take mean across the whole `image`
lowerCAmelCase : Union[str, Any] = np.mean(UpperCAmelCase_ )
lowerCAmelCase : Dict = np.std(UpperCAmelCase_ )
lowerCAmelCase : List[str] = max(UpperCAmelCase_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , **UpperCAmelCase_ )
def lowercase__ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : int , ):
lowerCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase : Any = patch_size if patch_size is not None else self.patch_size
lowerCAmelCase : Union[str, Any] = max_patches if max_patches is not None else self.max_patches
lowerCAmelCase : Union[str, Any] = self.is_vqa
if kwargs.get('data_format' , UpperCAmelCase_ ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
lowerCAmelCase : Optional[Any] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase : Optional[int] = [convert_to_rgb(UpperCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase : str = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
lowerCAmelCase : Tuple = kwargs.pop('font_bytes' , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = kwargs.pop('font_path' , UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Dict = [header_text] * len(UpperCAmelCase_ )
lowerCAmelCase : List[str] = [
render_header(UpperCAmelCase_ , header_text[i] , font_bytes=UpperCAmelCase_ , font_path=UpperCAmelCase_ )
for i, image in enumerate(UpperCAmelCase_ )
]
if do_normalize:
lowerCAmelCase : Any = [self.normalize(image=UpperCAmelCase_ ) for image in images]
# convert to torch tensor and permute
lowerCAmelCase : List[Any] = [
self.extract_flattened_patches(image=UpperCAmelCase_ , max_patches=UpperCAmelCase_ , patch_size=UpperCAmelCase_ )
for image in images
]
# create attention mask in numpy
lowerCAmelCase : Dict = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowerCAmelCase : Dict = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=UpperCAmelCase_ )
return encoded_outputs
| 355
|
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
lowerCAmelCase : Any = sum(i * i for i in range(1, n + 1 ) )
lowerCAmelCase : str = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 323
| 0
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __A :
def __init__( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=14 , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[Any]=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : List[str]=0.02 , ):
lowerCAmelCase : Dict = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : int = is_training
lowerCAmelCase : str = use_input_mask
lowerCAmelCase : int = use_token_type_ids
lowerCAmelCase : List[Any] = use_labels
lowerCAmelCase : str = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Dict = rotary_dim
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Optional[int] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Tuple = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : int = max_position_embeddings
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : int = None
lowerCAmelCase : List[Any] = vocab_size - 1
lowerCAmelCase : Dict = vocab_size - 1
lowerCAmelCase : Union[str, Any] = vocab_size - 1
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Dict = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase__ ( self : Dict ):
lowerCAmelCase : int = self.prepare_config_and_inputs()
lowerCAmelCase : List[str] = config_and_inputs
lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : Optional[int] = 20
lowerCAmelCase : List[Any] = model_class_name(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
lowerCAmelCase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase : Optional[int] = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
lowerCAmelCase : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCAmelCase : List[Any] = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , )
lowerCAmelCase : List[Any] = model(UpperCAmelCase_ )
lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def lowercase__ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : List[str] = 20
lowerCAmelCase : Any = model_class_name(UpperCAmelCase_ )
lowerCAmelCase : Tuple = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCAmelCase : Optional[Any] = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase : Any = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
lowerCAmelCase : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCAmelCase : Dict = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
lowerCAmelCase : Union[str, Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCAmelCase_ : Tuple = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Tuple = FlaxGPTJModelTester(self )
def lowercase__ ( self : int ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@tooslow
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
lowerCAmelCase : int = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
lowerCAmelCase : Tuple = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
lowerCAmelCase : List[str] = False
lowerCAmelCase : List[str] = model.config.eos_token_id
lowerCAmelCase : Optional[Any] = jax.jit(model.generate )
lowerCAmelCase : List[str] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCAmelCase : Optional[Any] = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@is_pt_flax_cross_test
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase : Optional[Any] = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase : Union[str, Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Dict = pt_inputs['input_ids'].shape
lowerCAmelCase : Tuple = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Any = 0
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : List[str] = 0
lowerCAmelCase : int = 1
lowerCAmelCase : Optional[Any] = pt_model_class(UpperCAmelCase_ ).eval()
lowerCAmelCase : int = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
lowerCAmelCase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = fx_state
with torch.no_grad():
lowerCAmelCase : List[str] = pt_model(**UpperCAmelCase_ ).to_tuple()
lowerCAmelCase : Optional[int] = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[str] = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ )
lowerCAmelCase : str = fx_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase : List[str] = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase : str = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = pt_model_class(UpperCAmelCase_ ).eval()
lowerCAmelCase : Optional[int] = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
lowerCAmelCase : Dict = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params )
lowerCAmelCase : List[str] = pt_inputs['input_ids'].shape
lowerCAmelCase : Any = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Optional[Any] = 1
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Union[str, Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCAmelCase : List[Any] = pt_model(**UpperCAmelCase_ ).to_tuple()
lowerCAmelCase : int = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[str] = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ )
with torch.no_grad():
lowerCAmelCase : Tuple = pt_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def lowercase__ ( self : Dict ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : str = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
lowerCAmelCase : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
| 356
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 0.0
for coeff in reversed(_UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
__A : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
__A : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 323
| 0
|
import mpmath # for roots of unity
import numpy as np
class __A :
def __init__( self : Optional[int] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : str=None ):
# Input as list
lowerCAmelCase : str = list(poly_a or [0] )[:]
lowerCAmelCase : Dict = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowerCAmelCase : Union[str, Any] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowerCAmelCase : Optional[Any] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowerCAmelCase : Tuple = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowerCAmelCase : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowerCAmelCase : Optional[Any] = self.__multiply()
def lowercase__ ( self : str , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : Dict = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase_ ) <= 1:
return dft[0]
#
lowerCAmelCase : Dict = self.c_max_length // 2
while next_ncol > 0:
lowerCAmelCase : Tuple = [[] for i in range(UpperCAmelCase_ )]
lowerCAmelCase : List[str] = self.root**next_ncol
# First half of next step
lowerCAmelCase : Any = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowerCAmelCase : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowerCAmelCase : List[str] = new_dft
lowerCAmelCase : int = next_ncol // 2
return dft[0]
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.__dft('A' )
lowerCAmelCase : Any = self.__dft('B' )
lowerCAmelCase : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowerCAmelCase : Optional[Any] = 2
while next_ncol <= self.c_max_length:
lowerCAmelCase : Optional[Any] = [[] for i in range(UpperCAmelCase_ )]
lowerCAmelCase : int = self.root ** (next_ncol // 2)
lowerCAmelCase : str = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowerCAmelCase : List[str] = new_inverse_c
next_ncol *= 2
# Unpack
lowerCAmelCase : int = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Optional[int] ):
lowerCAmelCase : str = 'A = ' + ' + '.join(
f"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowerCAmelCase : List[str] = 'B = ' + ' + '.join(
f"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowerCAmelCase : List[str] = 'A*B = ' + ' + '.join(
f"{coef}*x^{i}" for coef, i in enumerate(self.product ) )
return f"{a}\n{b}\n{c}"
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , ):
lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase : str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = min_resolution
lowerCAmelCase : Any = max_resolution
lowerCAmelCase : int = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : int = do_center_crop
lowerCAmelCase : str = crop_size
def lowercase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'crop_size' ) )
def lowercase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : List[str] ):
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Dict = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : List[str] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 323
| 0
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[Any]=99 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : List[str]=36 , UpperCAmelCase_ : Union[str, Any]=6 , UpperCAmelCase_ : Union[str, Any]=6 , UpperCAmelCase_ : List[str]=6 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : str=None , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : List[str] = batch_size
lowerCAmelCase : Optional[int] = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : Tuple = use_input_mask
lowerCAmelCase : Tuple = use_token_type_ids
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : str = vocab_size
lowerCAmelCase : Optional[int] = embedding_size
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : str = num_hidden_groups
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : List[Any] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[Any] = type_sequence_label_size
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : Union[str, Any] = num_labels
lowerCAmelCase : Any = num_choices
lowerCAmelCase : Dict = scope
def lowercase__ ( self : Any ):
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Dict = None
if self.use_input_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : str = None
if self.use_token_type_ids:
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Dict = None
lowerCAmelCase : Any = None
lowerCAmelCase : List[str] = None
if self.use_labels:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Union[str, Any] ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowercase__ ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any ):
lowerCAmelCase : str = AlbertModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Any = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Dict = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : List[str] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : str = AlbertForPreTraining(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Optional[Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , sentence_order_label=UpperCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[Any] = AlbertForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Union[str, Any] = AlbertForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : str = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = self.num_labels
lowerCAmelCase : int = AlbertForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Tuple = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ):
lowerCAmelCase : int = self.num_labels
lowerCAmelCase : List[str] = AlbertForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : List[str] = self.num_choices
lowerCAmelCase : Dict = AlbertForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Dict = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
lowerCAmelCase
) : Optional[Any] = config_and_inputs
lowerCAmelCase : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Dict = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : int = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : int = True
def lowercase__ ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]=False ):
lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
if return_labels:
if model_class in get_values(UpperCAmelCase_ ):
lowerCAmelCase : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
return inputs_dict
def lowercase__ ( self : int ):
lowerCAmelCase : Any = AlbertModelTester(self )
lowerCAmelCase : Any = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : Any ):
self.config_tester.run_common_tests()
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : int ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = AlbertModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_torch
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Dict ):
lowerCAmelCase : List[Any] = AlbertModel.from_pretrained('albert-base-v2' )
lowerCAmelCase : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCAmelCase : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase : Dict = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
lowerCAmelCase : str = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase_ )
lowerCAmelCase : Dict = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 358
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : Optional[Any] = ''
else:
lowerCAmelCase : Optional[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : Union[str, Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase : List[Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Optional[int] = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : List[str] = dct.pop(_UpperCAmelCase )
lowerCAmelCase : Dict = val
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : str = ViTMSNConfig()
lowerCAmelCase : str = 1_000
lowerCAmelCase : List[str] = 'datasets/huggingface/label-files'
lowerCAmelCase : int = 'imagenet-1k-id2label.json'
lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase, _UpperCAmelCase ), 'r' ) )
lowerCAmelCase : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase : List[str] = idalabel
lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[Any] = 384
lowerCAmelCase : List[Any] = 1_536
lowerCAmelCase : Union[str, Any] = 6
elif "l16" in checkpoint_url:
lowerCAmelCase : List[Any] = 1_024
lowerCAmelCase : Any = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Optional[int] = 16
lowerCAmelCase : Any = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase : Any = 4
elif "l7" in checkpoint_url:
lowerCAmelCase : int = 7
lowerCAmelCase : str = 1_024
lowerCAmelCase : Tuple = 4_096
lowerCAmelCase : str = 24
lowerCAmelCase : Tuple = 16
lowerCAmelCase : Dict = 0.1
lowerCAmelCase : List[str] = ViTMSNModel(_UpperCAmelCase )
lowerCAmelCase : int = torch.hub.load_state_dict_from_url(_UpperCAmelCase, map_location='cpu' )['target_encoder']
lowerCAmelCase : int = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCAmelCase )
lowerCAmelCase : Tuple = create_rename_keys(_UpperCAmelCase, base_model=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase, _UpperCAmelCase, base_model=_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
lowerCAmelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase : Dict = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase ).raw )
lowerCAmelCase : Any = ViTImageProcessor(
size=config.image_size, image_mean=_UpperCAmelCase, image_std=_UpperCAmelCase )
lowerCAmelCase : List[Any] = image_processor(images=_UpperCAmelCase, return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase : Union[str, Any] = model(**_UpperCAmelCase )
lowerCAmelCase : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase : Optional[int] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCAmelCase : Union[str, Any] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCAmelCase : Union[str, Any] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], _UpperCAmelCase, atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A : List[str] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 323
| 0
|
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__A : Optional[Any] = datasets.logging.get_logger(__name__)
__A : Tuple = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
__A : List[str] = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
__A : int = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
__A : List[Any] = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def lowercase__ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : int ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
lowerCAmelCase : Union[str, Any] = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
lowerCAmelCase : Union[str, Any] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowerCAmelCase : Optional[Any] = self.config_name.upper()
else:
raise KeyError(
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
lowerCAmelCase : Dict = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowerCAmelCase : List[str] = score.BleurtScorer(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ):
lowerCAmelCase : Optional[Any] = self.scorer.score(references=UpperCAmelCase_ , candidates=UpperCAmelCase_ )
return {"scores": scores}
| 359
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('String lengths must match!' )
lowerCAmelCase : Tuple = 0
for chara, chara in zip(_UpperCAmelCase, _UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 0
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__A : Union[str, Any] = get_logger()
__A : Optional[dict] = None
class __A ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Dict ):
super().__init__(features=UpperCAmelCase_ )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError(
f"Expected {device} to be a `str` not {type(UpperCAmelCase_ )}, as `jaxlib.xla_extension.Device` "
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
lowerCAmelCase : List[Any] = device if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase : Optional[int] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"Device with string identifier {self.device} not listed among the available "
f"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
f"device: {str(jax.devices()[0] )}." )
lowerCAmelCase : Any = str(jax.devices()[0] )
lowerCAmelCase : Optional[int] = jnp_array_kwargs
@staticmethod
def lowercase__ ( ):
import jax
return {str(UpperCAmelCase_ ): device for device in jax.devices()}
def lowercase__ ( self : str , UpperCAmelCase_ : Union[str, Any] ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and column:
if all(
isinstance(UpperCAmelCase_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCAmelCase_ , axis=0 )
return column
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase_ , (str, bytes, type(UpperCAmelCase_ )) ):
return value
elif isinstance(UpperCAmelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCAmelCase : Dict = {}
if isinstance(UpperCAmelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCAmelCase : int = {'dtype': jnp.intaa}
else:
lowerCAmelCase : str = {'dtype': jnp.intaa}
elif isinstance(UpperCAmelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCAmelCase : Optional[int] = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase_ , PIL.Image.Image ):
lowerCAmelCase : List[Any] = np.asarray(UpperCAmelCase_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase : List[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase_ , **{**default_dtype, **self.jnp_array_kwargs} )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase_ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCAmelCase_ , '__array__' ) and not isinstance(UpperCAmelCase_ , jax.Array ):
lowerCAmelCase : int = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase_ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase_ )
def lowercase__ ( self : int , UpperCAmelCase_ : dict ):
return map_nested(self._recursive_tensorize , UpperCAmelCase_ , map_list=UpperCAmelCase_ )
def lowercase__ ( self : int , UpperCAmelCase_ : pa.Table ):
lowerCAmelCase : int = self.numpy_arrow_extractor().extract_row(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = self.python_features_decoder.decode_row(UpperCAmelCase_ )
return self.recursive_tensorize(UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : pa.Table ):
lowerCAmelCase : Any = self.numpy_arrow_extractor().extract_column(UpperCAmelCase_ )
lowerCAmelCase : Any = self.python_features_decoder.decode_column(UpperCAmelCase_ , pa_table.column_names[0] )
lowerCAmelCase : Any = self.recursive_tensorize(UpperCAmelCase_ )
lowerCAmelCase : Tuple = self._consolidate(UpperCAmelCase_ )
return column
def lowercase__ ( self : List[str] , UpperCAmelCase_ : pa.Table ):
lowerCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase_ )
lowerCAmelCase : str = self.python_features_decoder.decode_batch(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.recursive_tensorize(UpperCAmelCase_ )
for column_name in batch:
lowerCAmelCase : List[Any] = self._consolidate(batch[column_name] )
return batch
| 360
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__A : List[Any] = trt.Logger(trt.Logger.WARNING)
__A : Optional[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__A : List[Any] = logging.getLogger(__name__)
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__A : List[str] = parser.parse_args()
if args.tokenizer_name:
__A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__A : List[Any] = args.per_device_eval_batch_size
__A : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__A : Any = True
__A : Union[str, Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__A : List[str] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__A : Dict = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__A : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__A : str = [network.get_input(i) for i in range(network.num_inputs)]
__A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__A : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__A : List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__A : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = np.asarray(inputs['input_ids'], dtype=np.intaa )
lowerCAmelCase : Optional[int] = np.asarray(inputs['attention_mask'], dtype=np.intaa )
lowerCAmelCase : Dict = np.asarray(inputs['token_type_ids'], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), _UpperCAmelCase )
# start time
lowerCAmelCase : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase : List[str] = time.time()
lowerCAmelCase : Tuple = end_time - start_time
lowerCAmelCase : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__A : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__A : int = raw_datasets['''validation'''].column_names
__A : int = '''question''' if '''question''' in column_names else column_names[0]
__A : List[str] = '''context''' if '''context''' in column_names else column_names[1]
__A : int = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__A : str = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__A : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='only_second' if pad_on_right else 'only_first', max_length=_UpperCAmelCase, stride=args.doc_stride, return_overflowing_tokens=_UpperCAmelCase, return_offsets_mapping=_UpperCAmelCase, padding='max_length', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase : List[Any] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase : Optional[Any] = tokenized_examples.sequence_ids(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__A : int = raw_datasets['''validation''']
# Validation Feature Creation
__A : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__A : List[str] = default_data_collator
__A : int = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__A : Union[str, Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase="eval" ) -> int:
'''simple docstring'''
lowerCAmelCase : str = postprocess_qa_predictions(
examples=_UpperCAmelCase, features=_UpperCAmelCase, predictions=_UpperCAmelCase, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=_UpperCAmelCase, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase : Union[str, Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase : List[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase : Optional[Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCAmelCase, label_ids=_UpperCAmelCase )
__A : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
__A : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__A : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
__A : Union[str, Any] = 0.0
__A : Optional[Any] = 0
__A : Optional[Any] = timeit.default_timer()
__A : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
__A , __A : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__A , __A : str = outputs
__A : Optional[Any] = torch.tensor(start_logits)
__A : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__A : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__A : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__A : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__A : str = nested_truncate(all_preds, len(eval_dataset))
__A : Any = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__A : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__A : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 323
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : Optional[int] = os.path.join(args.tf_model_dir, 'parameters.json' )
lowerCAmelCase : str = json.loads(open(_UpperCAmelCase ).read() )
if not params:
raise ValueError(
f"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file." )
if not args.output.endswith('.pt' ):
lowerCAmelCase : Dict = args.output + '.pt'
lowerCAmelCase : Optional[int] = OrderedDict()
with tf.device('/CPU:0' ):
lowerCAmelCase : str = tf.train.load_checkpoint(args.tf_model_dir )
lowerCAmelCase : Optional[int] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowerCAmelCase : int = reader.get_tensor(_UpperCAmelCase ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
lowerCAmelCase : Optional[Any] = int(key_name[9] )
elif key_name.startswith('pasts/out' ):
lowerCAmelCase : Optional[Any] = 8
lowerCAmelCase : Any = 'model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowerCAmelCase : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase : List[str] = torch.tensor(_UpperCAmelCase )
elif key_name.startswith('model/moe' ):
lowerCAmelCase : Optional[Any] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
lowerCAmelCase : Optional[int] = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
lowerCAmelCase : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase : Optional[Any] = torch.tensor(_UpperCAmelCase )
elif key_name.endswith('/softmlp/kernel' ):
lowerCAmelCase : Any = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
lowerCAmelCase : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase : Union[str, Any] = torch.tensor(_UpperCAmelCase )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
lowerCAmelCase : List[Any] = key_name[-9:-7]
for i in range(16 ):
lowerCAmelCase : str = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
lowerCAmelCase : str = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowerCAmelCase : int = torch.tensor(_UpperCAmelCase )
elif key_name.startswith('model/mlp' ):
lowerCAmelCase : List[str] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
lowerCAmelCase : Optional[Any] = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player
lowerCAmelCase : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase : str = torch.tensor(_UpperCAmelCase )
elif key_name.endswith('/p1/bias' ):
lowerCAmelCase : int = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player
lowerCAmelCase : Tuple = vnp.copy() # same because it is one dimensional
lowerCAmelCase : Tuple = torch.tensor(_UpperCAmelCase )
elif key_name.endswith('/p2/kernel' ):
lowerCAmelCase : int = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player
lowerCAmelCase : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase : Tuple = torch.tensor(_UpperCAmelCase )
elif key_name.endswith('/p2/bias' ):
lowerCAmelCase : Tuple = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player
lowerCAmelCase : Dict = vnp.copy() # same because it is one dimensional
lowerCAmelCase : str = torch.tensor(_UpperCAmelCase )
elif key_name.startswith('model/ln' ):
lowerCAmelCase : Optional[Any] = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
lowerCAmelCase : Optional[Any] = 'model.blocks.%d.feed_forward.norm.bias' % player
lowerCAmelCase : Union[str, Any] = vnp.copy() # same because it is one dimensional
lowerCAmelCase : Union[str, Any] = torch.tensor(_UpperCAmelCase )
elif key_name.endswith('/g' ):
lowerCAmelCase : Dict = 'model.blocks.%d.feed_forward.norm.weight' % player
lowerCAmelCase : Optional[Any] = vnp.copy() # same because it is one dimensional
lowerCAmelCase : Union[str, Any] = torch.tensor(_UpperCAmelCase )
elif key_name.startswith('model/att' ):
lowerCAmelCase : Union[str, Any] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
lowerCAmelCase : List[str] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowerCAmelCase : Optional[int] = state[:, 0, :, :]
lowerCAmelCase : Dict = state[:, 1, :, :]
lowerCAmelCase : Union[str, Any] = state[:, 2, :, :]
lowerCAmelCase : Optional[Any] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase : Dict = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase : str = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase : Any = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
lowerCAmelCase : str = torch.tensor(_UpperCAmelCase )
lowerCAmelCase : List[str] = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
lowerCAmelCase : str = torch.tensor(_UpperCAmelCase )
lowerCAmelCase : str = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
lowerCAmelCase : Union[str, Any] = torch.tensor(_UpperCAmelCase )
elif key_name.endswith('/o/kernel' ):
lowerCAmelCase : Tuple = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
lowerCAmelCase : List[str] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase : List[str] = torch.tensor(_UpperCAmelCase )
elif key_name.startswith('model/an' ):
lowerCAmelCase : Tuple = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
lowerCAmelCase : int = 'model.blocks.%d.self_attn.norm.bias' % player
lowerCAmelCase : List[str] = vnp.copy() # same because it is one dimensional
lowerCAmelCase : Union[str, Any] = torch.tensor(_UpperCAmelCase )
elif key_name.endswith('/g' ):
lowerCAmelCase : Tuple = 'model.blocks.%d.self_attn.norm.weight' % player
lowerCAmelCase : Dict = vnp.copy() # same because it is one dimensional
lowerCAmelCase : str = torch.tensor(_UpperCAmelCase )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
lowerCAmelCase : Tuple = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
lowerCAmelCase : int = 'model.%s.weight' % nlayer
lowerCAmelCase : Optional[Any] = vnp.copy() # same in embedded
lowerCAmelCase : Tuple = torch.tensor(_UpperCAmelCase )
if key_name.startswith('model/wte' ):
lowerCAmelCase : Optional[Any] = 'lm_head.weight'
lowerCAmelCase : Optional[int] = vnp.copy() # same in embedded
lowerCAmelCase : Dict = torch.tensor(_UpperCAmelCase )
elif key_name.startswith('model/wob' ):
lowerCAmelCase : str = 'final_logits_bias'
lowerCAmelCase : int = vnp.copy() # same in embedded
lowerCAmelCase : Union[str, Any] = state.reshape((1, -1) )
lowerCAmelCase : List[str] = torch.tensor(_UpperCAmelCase )
elif key_name == "model/dense/kernel":
lowerCAmelCase : Optional[int] = 'model.last_project.weight'
lowerCAmelCase : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase : Optional[Any] = torch.tensor(_UpperCAmelCase )
elif key_name == "model/dense_1/bias":
lowerCAmelCase : Optional[int] = 'model.last_project.bias'
lowerCAmelCase : List[Any] = vnp.copy() # same because it is one dimensional
lowerCAmelCase : Union[str, Any] = torch.tensor(_UpperCAmelCase )
torch.save(_UpperCAmelCase, args.output )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
__A : Optional[int] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 361
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __A ( lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase_ : Optional[Any] = "dinat"
lowerCAmelCase_ : Dict = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Optional[Any]=64 , UpperCAmelCase_ : List[Any]=[3, 4, 6, 5] , UpperCAmelCase_ : Dict=[2, 4, 8, 16] , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCAmelCase_ : int=3.0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[str]=1E-5 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : str = embed_dim
lowerCAmelCase : Any = depths
lowerCAmelCase : List[Any] = len(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = num_heads
lowerCAmelCase : Tuple = kernel_size
lowerCAmelCase : List[str] = dilations
lowerCAmelCase : Any = mlp_ratio
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
lowerCAmelCase : int = layer_scale_init_value
lowerCAmelCase : Optional[Any] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Tuple = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 323
| 0
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class __A ( nn.Module ):
lowerCAmelCase_ : int
lowerCAmelCase_ : jnp.dtype = jnp.floataa
def lowercase__ ( self : int ):
lowerCAmelCase : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , UpperCAmelCase_ : Optional[int] ):
lowerCAmelCase : int = hidden_states.shape
lowerCAmelCase : Optional[Any] = jax.image.resize(
UpperCAmelCase_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
lowerCAmelCase : List[Any] = self.conv(UpperCAmelCase_ )
return hidden_states
class __A ( nn.Module ):
lowerCAmelCase_ : int
lowerCAmelCase_ : jnp.dtype = jnp.floataa
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Dict , UpperCAmelCase_ : Optional[int] ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
lowerCAmelCase : Any = self.conv(UpperCAmelCase_ )
return hidden_states
class __A ( nn.Module ):
lowerCAmelCase_ : int
lowerCAmelCase_ : int = None
lowerCAmelCase_ : float = 0.0
lowerCAmelCase_ : bool = None
lowerCAmelCase_ : jnp.dtype = jnp.floataa
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
lowerCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowerCAmelCase : Optional[int] = nn.Conv(
UpperCAmelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase : Optional[Any] = nn.Dense(UpperCAmelCase_ , dtype=self.dtype )
lowerCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowerCAmelCase : Tuple = nn.Dropout(self.dropout_prob )
lowerCAmelCase : Any = nn.Conv(
UpperCAmelCase_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCAmelCase : Optional[Any] = None
if use_nin_shortcut:
lowerCAmelCase : List[Any] = nn.Conv(
UpperCAmelCase_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=True ):
lowerCAmelCase : Union[str, Any] = hidden_states
lowerCAmelCase : str = self.norma(UpperCAmelCase_ )
lowerCAmelCase : str = nn.swish(UpperCAmelCase_ )
lowerCAmelCase : Tuple = self.conva(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.time_emb_proj(nn.swish(UpperCAmelCase_ ) )
lowerCAmelCase : str = jnp.expand_dims(jnp.expand_dims(UpperCAmelCase_ , 1 ) , 1 )
lowerCAmelCase : int = hidden_states + temb
lowerCAmelCase : Optional[Any] = self.norma(UpperCAmelCase_ )
lowerCAmelCase : Tuple = nn.swish(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.dropout(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = self.conva(UpperCAmelCase_ )
if self.conv_shortcut is not None:
lowerCAmelCase : Any = self.conv_shortcut(UpperCAmelCase_ )
return hidden_states + residual
| 362
|
from manim import *
class __A ( lowerCAmelCase ):
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase : List[str] = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Dict = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : str = Text('CPU' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : int = [mem.copy() for i in range(4 )]
lowerCAmelCase : Union[str, Any] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = Text('GPU' , font_size=24 )
lowerCAmelCase : Tuple = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : List[str] = Text('Model' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = []
for i, rect in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Optional[Any] = fill.copy().set_fill(UpperCAmelCase_ , opacity=0.8 )
target.move_to(UpperCAmelCase_ )
model_arr.append(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ )
lowerCAmelCase : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Tuple = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Union[str, Any] = Text('Disk' , font_size=24 )
lowerCAmelCase : Optional[Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase : Optional[int] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Dict = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(UpperCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase_ )
lowerCAmelCase : str = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[Any] = Square(0.3 )
input.set_fill(UpperCAmelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase_ , buff=0.5 )
self.play(Write(UpperCAmelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase_ , buff=0.02 )
self.play(MoveToTarget(UpperCAmelCase_ ) )
self.play(FadeOut(UpperCAmelCase_ ) )
lowerCAmelCase : List[Any] = Arrow(start=UpperCAmelCase_ , end=UpperCAmelCase_ , color=UpperCAmelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCAmelCase : int = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) )
lowerCAmelCase : Optional[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCAmelCase_ ) , Circumscribe(model_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCAmelCase : Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCAmelCase : int = AnimationGroup(
FadeOut(UpperCAmelCase_ , run_time=0.5 ) , MoveToTarget(UpperCAmelCase_ , run_time=0.5 ) , FadeIn(UpperCAmelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCAmelCase : List[str] = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCAmelCase : int = a_c
lowerCAmelCase : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase_ ) , FadeOut(UpperCAmelCase_ , run_time=0.5 ) , )
lowerCAmelCase : int = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) , MoveToTarget(UpperCAmelCase_ ) )
self.wait()
| 323
| 0
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : Any = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
__A : List[Any] = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
__A : Any = '''</w>'''
__A : Union[str, Any] = '''@@ '''
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Any = set()
lowerCAmelCase : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
__A : str = {'''facebook/s2t-wav2vec2-large-en-de''': 1024}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : int = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]="<s>" , UpperCAmelCase_ : int="<pad>" , UpperCAmelCase_ : List[str]="</s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(
unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCAmelCase : Tuple = do_lower_case
with open(UpperCAmelCase_ , encoding='utf-8' ) as vocab_handle:
lowerCAmelCase : List[str] = json.load(UpperCAmelCase_ )
lowerCAmelCase : Dict = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
lowerCAmelCase : str = None
lowerCAmelCase : Tuple = None
else:
with open(UpperCAmelCase_ , encoding='utf-8' ) as merges_handle:
lowerCAmelCase : Tuple = merges_handle.read().split('\n' )[:-1]
lowerCAmelCase : Dict = [tuple(merge.split()[:2] ) for merge in merges]
lowerCAmelCase : List[Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[int] = {}
@property
def lowercase__ ( self : str ):
return len(self.decoder )
def lowercase__ ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Optional[int] ):
lowerCAmelCase : Any = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowerCAmelCase : int = get_pairs(UpperCAmelCase_ )
if not pairs:
return token
while True:
lowerCAmelCase : List[str] = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase : Optional[Any] = bigram
lowerCAmelCase : List[str] = []
lowerCAmelCase : Tuple = 0
while i < len(UpperCAmelCase_ ):
try:
lowerCAmelCase : List[str] = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase : Dict = j
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase : Optional[Any] = tuple(UpperCAmelCase_ )
lowerCAmelCase : Tuple = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
lowerCAmelCase : Dict = get_pairs(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = ' '.join(UpperCAmelCase_ )
if word == "\n " + BPE_TOKEN_MERGES:
lowerCAmelCase : Dict = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCAmelCase_ ):
lowerCAmelCase : Optional[int] = word.replace(UpperCAmelCase_ , '' )
lowerCAmelCase : Tuple = word.replace(' ' , UpperCAmelCase_ )
lowerCAmelCase : Dict = word
return word
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : str ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
lowerCAmelCase : Optional[Any] = text.lower()
lowerCAmelCase : Optional[Any] = text.split()
lowerCAmelCase : Optional[int] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(' ' ) ) )
return split_tokens
def lowercase__ ( self : List[str] , UpperCAmelCase_ : str ):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Any , UpperCAmelCase_ : int ):
lowerCAmelCase : Union[str, Any] = self.decoder.get(UpperCAmelCase_ , self.unk_token )
return result
def lowercase__ ( self : int , UpperCAmelCase_ : List[str] ):
lowerCAmelCase : List[Any] = ' '.join(UpperCAmelCase_ )
# make sure @@ tokens are concatenated
lowerCAmelCase : int = ''.join(string.split(UpperCAmelCase_ ) )
return string
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase : int = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase : List[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) + '\n' )
lowerCAmelCase : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
lowerCAmelCase : List[str] = token_index
writer.write(' '.join(UpperCAmelCase_ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 363
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[Any] = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 364
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__A : Dict = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__A : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__A : List[str] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__A : Any = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__A : Tuple = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__A : Any = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__A : List[str] = tf.keras.preprocessing.image.img_to_array(test_image)
__A : Optional[Any] = np.expand_dims(test_image, axis=0)
__A : int = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__A : Optional[int] = '''Normal'''
if result[0][0] == 1:
__A : str = '''Abnormality detected'''
| 323
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.