code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def A_ ( A__ , A__ , A__ ) -> int:
def update_area_of_max_square(A__ , A__ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
a__ : Optional[int] = update_area_of_max_square(A__ , col + 1 )
a__ : Union[str, Any] = update_area_of_max_square(row + 1 , col + 1 )
a__ : Tuple = update_area_of_max_square(row + 1 , A__ )
if mat[row][col]:
a__ : Dict = 1 + min([right, diagonal, down] )
a__ : List[Any] = max(largest_square_area[0] , A__ )
return sub_problem_sol
else:
return 0
a__ : Union[str, Any] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def A_ ( A__ , A__ , A__ ) -> int:
def update_area_of_max_square_using_dp_array(
A__ , A__ , A__ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
a__ : List[Any] = update_area_of_max_square_using_dp_array(A__ , col + 1 , A__ )
a__ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , A__ )
a__ : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , A__ , A__ )
if mat[row][col]:
a__ : str = 1 + min([right, diagonal, down] )
a__ : List[str] = max(largest_square_area[0] , A__ )
a__ : List[Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
a__ : int = [0]
a__ : Dict = [[-1] * cols for _ in range(A__ )]
update_area_of_max_square_using_dp_array(0 , 0 , A__ )
return largest_square_area[0]
def A_ ( A__ , A__ , A__ ) -> int:
a__ : List[Any] = [[0] * (cols + 1) for _ in range(rows + 1 )]
a__ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
a__ : Union[str, Any] = dp_array[row][col + 1]
a__ : int = dp_array[row + 1][col + 1]
a__ : Dict = dp_array[row + 1][col]
if mat[row][col] == 1:
a__ : Tuple = 1 + min(A__ , A__ , A__ )
a__ : Optional[int] = max(dp_array[row][col] , A__ )
else:
a__ : Tuple = 0
return largest_square_area
def A_ ( A__ , A__ , A__ ) -> int:
a__ : Any = [0] * (cols + 1)
a__ : Any = [0] * (cols + 1)
a__ : Dict = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
a__ : List[Any] = current_row[col + 1]
a__ : Union[str, Any] = next_row[col + 1]
a__ : List[Any] = next_row[col]
if mat[row][col] == 1:
a__ : Optional[Any] = 1 + min(A__ , A__ , A__ )
a__ : Tuple = max(current_row[col] , A__ )
else:
a__ : Optional[Any] = 0
a__ : str = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 99
|
def A_ ( A__ ) -> int:
stooge(A__ , 0 , len(A__ ) - 1 )
return arr
def A_ ( A__ , A__ , A__ ) -> List[Any]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
a__ , a__ : List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
a__ : Dict = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(A__ , A__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(A__ , i + t , (A__) )
# Recursively sort first 2/3 elements
stooge(A__ , A__ , (h - t) )
if __name__ == "__main__":
lowercase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
lowercase : Dict = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 99
| 1
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , snake_case : Tuple , snake_case : List[str]=2 , snake_case : List[str]=8 , snake_case : List[Any]=True , snake_case : Optional[Any]=True , snake_case : List[Any]=True , snake_case : Dict=True , snake_case : Tuple=99 , snake_case : Dict=16 , snake_case : Dict=5 , snake_case : int=2 , snake_case : Any=36 , snake_case : str="gelu" , snake_case : Dict=0.0 , snake_case : List[Any]=0.0 , snake_case : int=512 , snake_case : List[Any]=16 , snake_case : Tuple=2 , snake_case : Any=0.02 , snake_case : Optional[Any]=3 , snake_case : List[Any]=4 , snake_case : str=None , ):
'''simple docstring'''
A__ : Union[str, Any] = parent
A__ : Optional[Any] = batch_size
A__ : Dict = seq_length
A__ : str = is_training
A__ : Tuple = use_input_mask
A__ : Dict = use_token_type_ids
A__ : Dict = use_labels
A__ : int = vocab_size
A__ : List[str] = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : int = num_attention_heads
A__ : List[str] = intermediate_size
A__ : int = hidden_act
A__ : str = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : Any = max_position_embeddings
A__ : Optional[int] = type_vocab_size
A__ : int = type_sequence_label_size
A__ : Optional[Any] = initializer_range
A__ : int = num_labels
A__ : Optional[int] = num_choices
A__ : Optional[int] = scope
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any = None
if self.use_input_mask:
A__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] = None
if self.use_token_type_ids:
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Dict = None
A__ : List[str] = None
A__ : Union[str, Any] = None
if self.use_labels:
A__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Any = ids_tensor([self.batch_size] , self.num_choices )
A__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.get_config()
A__ : List[str] = 300
return config
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Tuple = self.prepare_config_and_inputs()
A__ : List[str] = True
A__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCamelCase ( self : Any , snake_case : Any , snake_case : Tuple , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Dict ):
'''simple docstring'''
A__ : List[str] = MraModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A__ : List[str] = model(snake_case , token_type_ids=snake_case )
A__ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : List[Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Dict , snake_case : str , snake_case : Dict , snake_case : str , ):
'''simple docstring'''
A__ : Dict = True
A__ : Optional[Any] = MraModel(snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , )
A__ : Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Dict , snake_case : Dict , snake_case : Dict , snake_case : List[str] , snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : Dict = MraForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Tuple , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Optional[Any] = MraForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Dict , snake_case : str , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : str = self.num_labels
A__ : Union[str, Any] = MraForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : List[str] = self.num_choices
A__ : str = MraForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
A__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Dict = config_and_inputs
A__ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = ()
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Optional[Any] = MraModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : List[str] = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : str = MraModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip(reason="""MRA does not output attentions""" )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Any = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : List[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , snake_case )
A__ : int = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Union[str, Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
A__ : Tuple = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Dict = 5_0265
A__ : List[str] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : List[Any] = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Any = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
A__ : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
A__ : List[Any] = model(snake_case )[0]
A__ : Union[str, Any] = 5_0265
A__ : Optional[Any] = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : Optional[int] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 296
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'table-transformer'
snake_case_ = ['past_key_values']
snake_case_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Dict , snake_case : int=True , snake_case : Dict=None , snake_case : Union[str, Any]=3 , snake_case : Dict=100 , snake_case : Tuple=6 , snake_case : Optional[int]=2048 , snake_case : int=8 , snake_case : Dict=6 , snake_case : Any=2048 , snake_case : str=8 , snake_case : Union[str, Any]=0.0 , snake_case : List[str]=0.0 , snake_case : List[str]=True , snake_case : Any="relu" , snake_case : str=256 , snake_case : int=0.1 , snake_case : Dict=0.0 , snake_case : str=0.0 , snake_case : Union[str, Any]=0.02 , snake_case : Union[str, Any]=1.0 , snake_case : Optional[Any]=False , snake_case : int="sine" , snake_case : Optional[Any]="resnet50" , snake_case : Optional[int]=True , snake_case : Any=False , snake_case : int=1 , snake_case : Tuple=5 , snake_case : Optional[int]=2 , snake_case : Tuple=1 , snake_case : Optional[Any]=1 , snake_case : Optional[Any]=5 , snake_case : Dict=2 , snake_case : Any=0.1 , **snake_case : Any , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A__ : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(snake_case , snake_case ):
A__ : Optional[int] = backbone_config.get("""model_type""" )
A__ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A__ : List[str] = config_class.from_dict(snake_case )
# set timm attributes to None
A__ , A__ , A__ : str = None, None, None
A__ : Tuple = use_timm_backbone
A__ : str = backbone_config
A__ : str = num_channels
A__ : List[Any] = num_queries
A__ : Optional[Any] = d_model
A__ : Tuple = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : List[Any] = encoder_attention_heads
A__ : Optional[int] = decoder_ffn_dim
A__ : Any = decoder_layers
A__ : int = decoder_attention_heads
A__ : Any = dropout
A__ : Dict = attention_dropout
A__ : Dict = activation_dropout
A__ : Tuple = activation_function
A__ : List[str] = init_std
A__ : List[str] = init_xavier_std
A__ : Any = encoder_layerdrop
A__ : Optional[Any] = decoder_layerdrop
A__ : Union[str, Any] = encoder_layers
A__ : Dict = auxiliary_loss
A__ : List[Any] = position_embedding_type
A__ : Optional[Any] = backbone
A__ : str = use_pretrained_backbone
A__ : Union[str, Any] = dilation
# Hungarian matcher
A__ : Tuple = class_cost
A__ : Optional[Any] = bbox_cost
A__ : Dict = giou_cost
# Loss coefficients
A__ : Any = mask_loss_coefficient
A__ : str = dice_loss_coefficient
A__ : str = bbox_loss_coefficient
A__ : Union[str, Any] = giou_loss_coefficient
A__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return self.d_model
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = version.parse('1.11' )
@property
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return 1e-5
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return 12
| 296
| 1
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def __snake_case ( UpperCAmelCase_ : dict , UpperCAmelCase_ : str , UpperCAmelCase_ : set , UpperCAmelCase_ : set , UpperCAmelCase_ : dict , UpperCAmelCase_ : dict , UpperCAmelCase_ : PriorityQueue , UpperCAmelCase_ : dict , UpperCAmelCase_ : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ = cst_fwd.get(UpperCAmelCase_ , np.inf )
lowerCamelCase_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCamelCase_ = new_cost_f
lowerCamelCase_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : dict , UpperCAmelCase_ : dict ):
lowerCamelCase_ = -1
lowerCamelCase_ = set()
lowerCamelCase_ = set()
lowerCamelCase_ = {source: 0}
lowerCamelCase_ = {destination: 0}
lowerCamelCase_ = {source: None}
lowerCamelCase_ = {destination: None}
lowerCamelCase_ = PriorityQueue()
lowerCamelCase_ = PriorityQueue()
lowerCamelCase_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ ,lowerCamelCase_ = queue_forward.get()
visited_forward.add(UpperCAmelCase_ )
lowerCamelCase_ ,lowerCamelCase_ = queue_backward.get()
visited_backward.add(UpperCAmelCase_ )
lowerCamelCase_ = pass_and_relaxation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
lowerCamelCase_ = pass_and_relaxation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ = shortest_distance
return shortest_path_distance
a_ : List[Any] = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
a_ : Optional[int] = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
|
'''simple docstring'''
from __future__ import annotations
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = 2
lowerCamelCase_ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCAmelCase_ )
if n > 1:
factors.append(UpperCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 1
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( __magic_name__ ):
def __init__( self : Any , *a : Any , **a : List[str] ):
'''simple docstring'''
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 307
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( __magic_name__ ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[Any] = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Dict = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : int = scheduler.step(a , a , a )
lowerCAmelCase__ : Any = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : Any = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Any = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : str = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : str = model(a , a )
lowerCAmelCase__ : Dict = scheduler.step(a , a , a )
lowerCAmelCase__ : Tuple = output.prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3
else:
assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : Optional[int] = model(a , a )
lowerCAmelCase__ : Tuple = scheduler.step(a , a , a )
lowerCAmelCase__ : Dict = output.prev_sample
lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3
else:
assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
lowerCAmelCase__ : str = sample.to(a )
for t in scheduler.timesteps:
lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a )
lowerCAmelCase__ : int = model(a , a )
lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
else:
assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
| 307
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case ( A__ ):
return DownloadCommand(args.model ,args.cache_dir ,args.force ,args.trust_remote_code )
class UpperCamelCase_ (UpperCamelCase__ ):
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : List[str] ) -> Tuple:
UpperCAmelCase_ : List[str] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=lowerCAmelCase__ , help="Name of the model to download" )
download_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = model
UpperCAmelCase_ : Optional[int] = cache
UpperCAmelCase_ : Any = force
UpperCAmelCase_ : Dict = trust_remote_code
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 268
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCAmelCase : int = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
a__ : Optional[int] =XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE )
a__ : Dict =finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
a__ : List[str] =finetuning_task
a__ : Tuple =GLUE_TASKS_NUM_LABELS[finetuning_task]
a__ : List[Any] =XLNetForSequenceClassification(SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
a__ : Optional[int] =finetuning_task
a__ : Dict =XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE )
else:
a__ : List[Any] =XLNetLMHeadModel(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(f'''Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
UpperCAmelCase : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 95
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : str = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 354
|
from __future__ import annotations
A : Dict = "#"
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict ) -> None:
SCREAMING_SNAKE_CASE_ = {}
def __A ( self : List[Any] , __magic_name__ : str ) -> None:
SCREAMING_SNAKE_CASE_ = self._trie
for char in text:
if char not in trie:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = trie[char]
SCREAMING_SNAKE_CASE_ = True
def __A ( self : Union[str, Any] , __magic_name__ : str ) -> tuple | list:
SCREAMING_SNAKE_CASE_ = self._trie
for char in prefix:
if char in trie:
SCREAMING_SNAKE_CASE_ = trie[char]
else:
return []
return self._elements(__magic_name__ )
def __A ( self : int , __magic_name__ : dict ) -> tuple:
SCREAMING_SNAKE_CASE_ = []
for c, v in d.items():
SCREAMING_SNAKE_CASE_ = [" "] if c == END else [(c + s) for s in self._elements(__magic_name__ )]
result.extend(__magic_name__ )
return tuple(__magic_name__ )
A : Union[str, Any] = Trie()
A : Optional[int] = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = trie.find_word(__UpperCamelCase )
return tuple(string + word for word in suffixes )
def a__ ( ):
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 305
| 0
|
from scipy.stats import spearmanr
import datasets
__lowercase = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__lowercase = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__lowercase = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase=False) -> List[str]:
__UpperCamelCase :Optional[Any] = spearmanr(__lowercase , __lowercase)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 43
|
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
SCREAMING_SNAKE_CASE_: List[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _lowercase (self : Tuple , __a : Optional[int] , __a : List[Any] ):
UpperCAmelCase_ = 0.0
for i, j in zip(__a , __a ):
n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0
UpperCAmelCase_ = n_correct / len(__a )
return {
"accuracy": accuracy,
}
| 1
| 0
|
from __future__ import annotations
def lowercase_ ( A__ = 4 ) -> list[list[int]]:
"""simple docstring"""
snake_case = abs(A__ ) or 4
return [[1 + x + y * row_size for x in range(A__ )] for y in range(A__ )]
def lowercase_ ( A__ ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(A__ ) )
# OR.. transpose(reverse_column(matrix))
def lowercase_ ( A__ ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(A__ ) )
# OR.. reverse_column(reverse_row(matrix))
def lowercase_ ( A__ ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(A__ ) )
# OR.. transpose(reverse_row(matrix))
def lowercase_ ( A__ ) -> list[list[int]]:
"""simple docstring"""
snake_case = [list(A__ ) for x in zip(*A__ )]
return matrix
def lowercase_ ( A__ ) -> list[list[int]]:
"""simple docstring"""
snake_case = matrix[::-1]
return matrix
def lowercase_ ( A__ ) -> list[list[int]]:
"""simple docstring"""
snake_case = [x[::-1] for x in matrix]
return matrix
def lowercase_ ( A__ ) -> None:
"""simple docstring"""
for i in matrix:
print(*A__ )
if __name__ == "__main__":
_A = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
_A = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
_A = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 137
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : str = "realm"
def __init__(self : Optional[int] , _A : Optional[Any]=3_0_5_2_2 , _A : Tuple=7_6_8 , _A : List[str]=1_2_8 , _A : Optional[Any]=1_2 , _A : Dict=1_2 , _A : Tuple=8 , _A : Dict=3_0_7_2 , _A : Union[str, Any]="gelu_new" , _A : Any=0.1 , _A : int=0.1 , _A : Union[str, Any]=5_1_2 , _A : List[str]=2 , _A : Any=0.02 , _A : int=1E-12 , _A : Tuple=2_5_6 , _A : Optional[Any]=1_0 , _A : Any=1E-3 , _A : int=5 , _A : int=3_2_0 , _A : Dict=1_3_3_5_3_7_1_8 , _A : Any=5_0_0_0 , _A : Union[str, Any]=1 , _A : Dict=0 , _A : int=2 , **_A : Union[str, Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
# Common config
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = hidden_size
snake_case = retriever_proj_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = num_candidates
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = type_vocab_size
snake_case = layer_norm_eps
# Reader config
snake_case = span_hidden_size
snake_case = max_span_width
snake_case = reader_layer_norm_eps
snake_case = reader_beam_size
snake_case = reader_seq_len
# Retrieval config
snake_case = num_block_records
snake_case = searcher_beam_size
| 137
| 1
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase : Union[str, Any] = 5_0_0_0_0_0
lowercase : Optional[Any] = os.path.split(__file__)
lowercase : List[Any] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def A_ ( A__ , **A__ ) -> str:
a__ : Tuple = dataset.map(**A__ )
@get_duration
def A_ ( A__ , **A__ ) -> str:
a__ : List[Any] = dataset.filter(**A__ )
def A_ ( ) -> List[Any]:
a__ : Any = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ : int = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
a__ : Dict = generate_example_dataset(
os.path.join(A__ , 'dataset.arrow' ) , A__ , num_examples=A__ )
a__ : Tuple = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=A__ )
def tokenize(A__ ):
return tokenizer(examples['text'] )
a__ : int = map(A__ )
a__ : str = map(A__ , batched=A__ )
a__ : List[str] = map(A__ , function=lambda A__ : None , batched=A__ )
with dataset.formatted_as(type='numpy' ):
a__ : Optional[int] = map(A__ , function=lambda A__ : None , batched=A__ )
with dataset.formatted_as(type='pandas' ):
a__ : Optional[Any] = map(A__ , function=lambda A__ : None , batched=A__ )
with dataset.formatted_as(type='torch' , columns='numbers' ):
a__ : Dict = map(A__ , function=lambda A__ : None , batched=A__ )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
a__ : Any = map(A__ , function=lambda A__ : None , batched=A__ )
a__ : Optional[int] = map(A__ , function=A__ , batched=A__ )
a__ : Tuple = filter(A__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(A__ , 'wb' ) as f:
f.write(json.dumps(A__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 99
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
snake_case__ : Optional[int] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def _a ( lowerCamelCase: List[Any] , lowerCamelCase: Any , lowerCamelCase: Union[str, Any] , lowerCamelCase: Any , lowerCamelCase: int ) -> List[str]:
'''simple docstring'''
for attribute in key.split('''.''' ):
__A = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
__A = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
__A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__A = value
elif weight_type == "weight_g":
__A = value
elif weight_type == "weight_v":
__A = value
elif weight_type == "bias":
__A = value
else:
__A = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _a ( lowerCamelCase: List[str] , lowerCamelCase: Optional[int] ) -> Tuple:
'''simple docstring'''
__A = []
__A = fairseq_model.state_dict()
__A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__A = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
__A = True
else:
for key, mapped_key in MAPPING.items():
__A = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__A = True
if "*" in mapped_key:
__A = name.split(lowerCamelCase )[0].split('''.''' )[-2]
__A = mapped_key.replace('''*''' , lowerCamelCase )
if "weight_g" in name:
__A = '''weight_g'''
elif "weight_v" in name:
__A = '''weight_v'''
elif "bias" in name:
__A = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A = '''weight'''
else:
__A = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _a ( lowerCamelCase: int , lowerCamelCase: Any , lowerCamelCase: int , lowerCamelCase: int , lowerCamelCase: List[str] ) -> Union[str, Any]:
'''simple docstring'''
__A = full_name.split('''conv_layers.''' )[-1]
__A = name.split('''.''' )
__A = int(items[0] )
__A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__A = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__A = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__A = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__A = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase )
@torch.no_grad()
def _a ( lowerCamelCase: Tuple , lowerCamelCase: int , lowerCamelCase: Optional[Any]=None , lowerCamelCase: Optional[Any]=None , lowerCamelCase: Optional[int]=True ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
__A = UniSpeechSatConfig.from_pretrained(lowerCamelCase )
else:
__A = UniSpeechSatConfig()
__A = ''''''
if is_finetuned:
__A = UniSpeechSatForCTC(lowerCamelCase )
else:
__A = UniSpeechSatForPreTraining(lowerCamelCase )
__A , __A , __A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__A = model[0].eval()
recursively_load_weights(lowerCamelCase , lowerCamelCase )
hf_wavavec.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case__ : Any = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 117
| 0
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCAmelCase = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__lowerCAmelCase = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__lowerCAmelCase = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
return float((preds == labels).mean() )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="binary" ) -> List[str]:
_a : Any = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Optional[Any] = float(fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ , average=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_a : Tuple = {}
for id_pred, label in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_a : Dict = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_a : int = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_a : Dict = [(pred, label)]
_a , _a : Union[str, Any] = [], []
for question, preds_labels in question_map.items():
_a , _a : List[Any] = zip(*lowerCAmelCase_ )
_a : Union[str, Any] = fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ , average='macro' )
fas.append(lowerCAmelCase_ )
_a : Union[str, Any] = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCAmelCase_ ) )
ems.append(lowerCAmelCase_ )
_a : int = float(sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ ) )
_a : Union[str, Any] = sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
_a : Optional[Any] = float(fa_score(y_true=lowerCAmelCase_ , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __lowercase ( self : Optional[int] ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None ,)
def __lowercase ( self : str ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __lowercase ( self : int ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Optional[int] ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ ,UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ ,UpperCAmelCase__ ,fa_avg='macro' )
elif self.config_name == "record":
_a : str = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
_a : Dict = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ ,UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ ,UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ ,UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 361
|
'''simple docstring'''
from math import sqrt
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
_a : Dict = 0
for i in range(1 , int(sqrt(lowerCAmelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCAmelCase_ ):
total += i + n // i
elif i == sqrt(lowerCAmelCase_ ):
total += i
return total - n
def __lowerCamelCase ( lowerCAmelCase_ = 10000 ) -> int:
_a : Union[str, Any] = sum(
i
for i in range(1 , lowerCAmelCase_ )
if sum_of_divisors(sum_of_divisors(lowerCAmelCase_ ) ) == i and sum_of_divisors(lowerCAmelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 107
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : Optional[int] = logging.get_logger(__name__)
_A : int = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = """data2vec-text"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int=3_05_22 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_68 , SCREAMING_SNAKE_CASE__ : List[Any]=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : Tuple=30_72 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=5_12 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : Tuple=1e-1_2 , SCREAMING_SNAKE_CASE__ : Dict=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]="absolute" , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def a ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 229
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_A : Optional[int] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> None:
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 229
| 1
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = 0
@slow
def lowerCamelCase__ (self : Dict ) -> List[Any]:
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowercase__ = AutoTokenizer.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowercase__ = AutoTokenizer.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_lowercase ) , 0 )
def lowerCamelCase__ (self : Any ) -> Dict:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCamelCase__ (self : Dict ) -> str:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowerCamelCase__ (self : int ) -> List[str]:
"""simple docstring"""
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
# Check that tokenizer_type ≠ model_type
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , config=_lowercase )
self.assertIsInstance(_lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCamelCase__ (self : str ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(_lowercase , """vocab.txt""" ) )
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , tokenizer_type="""bert""" , use_fast=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(_lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(_lowercase , """merges.txt""" ) )
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , tokenizer_type="""gpt2""" , use_fast=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@require_tokenizers
def lowerCamelCase__ (self : str ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(_lowercase , """vocab.txt""" ) )
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , tokenizer_type="""bert""" )
self.assertIsInstance(_lowercase , _lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(_lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(_lowercase , """merges.txt""" ) )
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , tokenizer_type="""gpt2""" )
self.assertIsInstance(_lowercase , _lowercase )
def lowerCamelCase__ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
with pytest.raises(_lowercase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def lowerCamelCase__ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowercase__ = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(_lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(_lowercase , _lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , _lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowerCamelCase__ (self : Dict ) -> List[str]:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
lowercase__ = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = TOKENIZER_MAPPING.values()
lowercase__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_lowercase )
@require_tokenizers
def lowerCamelCase__ (self : List[Any] ) -> Any:
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=_lowercase ) , _lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , _lowercase )
@require_tokenizers
def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=_lowercase )
lowercase__ = """Hello, world. How are you?"""
lowercase__ = tokenizer.tokenize(_lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
lowercase__ = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=_lowercase )
lowercase__ = tokenizer.tokenize(_lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def lowerCamelCase__ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(_lowercase ) , _lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def lowerCamelCase__ (self : str ) -> str:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowercase )
lowercase__ = AutoTokenizer.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_lowercase , _lowercase )
def lowerCamelCase__ (self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = get_tokenizer_config("""bert-base-cased""" )
lowercase__ = config.pop("""_commit_hash""" , _lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_lowercase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowercase__ = get_tokenizer_config(_lowercase )
self.assertDictEqual(_lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowercase__ = AutoTokenizer.from_pretrained(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowercase )
lowercase__ = get_tokenizer_config(_lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , _lowercase )
AutoTokenizer.register(_lowercase , slow_tokenizer_class=_lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoTokenizer.register(_lowercase , slow_tokenizer_class=_lowercase )
lowercase__ = CustomTokenizer.from_pretrained(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowercase )
lowercase__ = AutoTokenizer.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , _lowercase )
# Can register in two steps
AutoTokenizer.register(_lowercase , slow_tokenizer_class=_lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_lowercase , fast_tokenizer_class=_lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_lowercase , slow_tokenizer_class=_lowercase , fast_tokenizer_class=_lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoTokenizer.register(_lowercase , fast_tokenizer_class=_lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = BertTokenizerFast.from_pretrained(_lowercase )
bert_tokenizer.save_pretrained(_lowercase )
lowercase__ = CustomTokenizerFast.from_pretrained(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowercase )
lowercase__ = AutoTokenizer.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , use_fast=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaises(_lowercase ):
lowercase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase ):
lowercase__ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_lowercase )
lowercase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowercase )
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , trust_remote_code=_lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowercase__ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_lowercase , use_fast=_lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowercase )
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , trust_remote_code=_lowercase , use_fast=_lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def lowerCamelCase__ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = False
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = NewTokenizer
A__ = False
try:
AutoConfig.register("""custom""" , _lowercase )
AutoTokenizer.register(_lowercase , slow_tokenizer_class=_lowercase )
AutoTokenizer.register(_lowercase , fast_tokenizer_class=_lowercase )
# If remote code is not set, the default is to use local
lowercase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
lowercase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=_lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowercase__ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
lowercase__ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_lowercase , use_fast=_lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowercase__ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
lowercase__ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_lowercase , use_fast=_lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=_lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowercase__ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=_lowercase , use_fast=_lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
_lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
lowercase__ = AutoTokenizer.from_pretrained("""bert-base""" )
def lowerCamelCase__ (self : Dict ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
_lowercase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , revision="""aaaaaa""" )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
lowercase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 366
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : List[Any] = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''segformer'''
def __init__(self : Dict , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Union[str, Any]=[2, 2, 2, 2] , _UpperCAmelCase : List[str]=[8, 4, 2, 1] , _UpperCAmelCase : str=[32, 64, 160, 256] , _UpperCAmelCase : Optional[int]=[7, 3, 3, 3] , _UpperCAmelCase : int=[4, 2, 2, 2] , _UpperCAmelCase : str=[1, 2, 5, 8] , _UpperCAmelCase : Optional[int]=[4, 4, 4, 4] , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Optional[Any]=1E-6 , _UpperCAmelCase : Optional[Any]=256 , _UpperCAmelCase : Any=255 , **_UpperCAmelCase : str , ) -> Tuple:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , _UpperCAmelCase , )
lowercase__ = num_channels
lowercase__ = num_encoder_blocks
lowercase__ = depths
lowercase__ = sr_ratios
lowercase__ = hidden_sizes
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = mlp_ratios
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = classifier_dropout_prob
lowercase__ = initializer_range
lowercase__ = drop_path_rate
lowercase__ = layer_norm_eps
lowercase__ = decoder_hidden_size
lowercase__ = kwargs.get("""reshape_last_stage""" , _UpperCAmelCase )
lowercase__ = semantic_loss_ignore_index
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = version.parse('''1.11''' )
@property
def lowerCamelCase__ (self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ (self : Optional[int] ) -> float:
"""simple docstring"""
return 1E-4
@property
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
return 12
| 146
| 0
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class A__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=7 , __magic_name__=False , __magic_name__=True , __magic_name__=False , __magic_name__=False , __magic_name__=1_9 , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=1_6 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ):
lowerCamelCase : Dict = parent
lowerCamelCase : Dict = batch_size
lowerCamelCase : Optional[int] = seq_length
lowerCamelCase : Dict = is_training
lowerCamelCase : Union[str, Any] = use_input_mask
lowerCamelCase : Dict = use_token_type_ids
lowerCamelCase : Any = use_labels
lowerCamelCase : Tuple = vocab_size
lowerCamelCase : Optional[int] = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : List[str] = attention_probs_dropout_prob
lowerCamelCase : Optional[Any] = max_position_embeddings
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Tuple = type_sequence_label_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = num_choices
lowerCamelCase : List[str] = scope
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Dict = None
lowerCamelCase : List[Any] = None
lowerCamelCase : Optional[Any] = None
if self.use_labels:
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[Any] = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=SCREAMING_SNAKE_CASE_ , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , )
return config
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : int = EsmForProteinFolding(config=SCREAMING_SNAKE_CASE_ ).float()
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
lowerCamelCase
) : Tuple = config_and_inputs
lowerCamelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase):
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Optional[Any] = (EsmForProteinFolding,) if is_torch_available() else ()
_UpperCAmelCase : Dict = ()
_UpperCAmelCase : Dict = {} if is_torch_available() else {}
_UpperCAmelCase : Optional[Any] = False
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = EsmFoldModelTester(self )
lowerCamelCase : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip("""Does not support attention outputs""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""ESMFold only has one output format.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase__ ( self ):
pass
@require_torch
class A__ ( _lowerCAmelCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
lowerCamelCase : str = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ )["""positions"""]
lowerCamelCase : Optional[Any] = torch.tensor([2.5_828, 0.7_993, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 287
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: int , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: int = hf_hub_url(repo_id=lowerCAmelCase__ , path=lowerCAmelCase__ , revision=lowerCAmelCase__ )
assert url == F'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(lowerCAmelCase__ )}'
| 147
| 0
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A=99 , _A=13 , _A=16 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=32 , _A=4 , _A=4 , _A=30 , _A=0 , _A=1 , _A=2 , _A=None , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE_ = self.decoder_seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_attention_mask
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = decoder_start_token_id
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = decoder_seq_length
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 1
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _UpperCamelCase ( self , _A , _A , _A , _A , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = TrOCRDecoder(config=_A ).to(_A ).eval()
SCREAMING_SNAKE_CASE_ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A )
SCREAMING_SNAKE_CASE_ = model(_A )
SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A )
self.parent.assertTrue(len(_A ) == len(_A ) )
self.parent.assertTrue(len(_A ) == len(_A ) + 1 )
SCREAMING_SNAKE_CASE_ = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE_ = model(_A )['''last_hidden_state''']
SCREAMING_SNAKE_CASE_ = model(_A , past_key_values=_A )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_A , _A , atol=1E-3 )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
UpperCAmelCase_ =(TrOCRForCausalLM,) if is_torch_available() else ()
UpperCAmelCase_ ={"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
UpperCAmelCase_ =True
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = TrOCRStandaloneDecoderModelTester(self , is_training=_A )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A )
def _UpperCamelCase ( self ) -> List[str]:
pass
def _UpperCamelCase ( self ) -> int:
pass
def _UpperCamelCase ( self ) -> List[Any]:
pass
def _UpperCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_A )
def _UpperCamelCase ( self ) -> List[Any]:
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _UpperCamelCase ( self ) -> List[Any]:
pass
| 257
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = BigBirdConfig.from_json_file(__lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE_ = BigBirdForQuestionAnswering(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = BigBirdForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__lowerCamelCase, __lowerCamelCase, is_trivia_qa=__lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 257
| 1
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> bool:
UpperCAmelCase__ : Optional[int] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def a__ ( lowerCAmelCase__ = 50_00 ) -> int:
UpperCAmelCase__ : Tuple = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase__ )]
for i, pentagonal_i in enumerate(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : Any = pentagonal_nums[j]
UpperCAmelCase__ : Optional[int] = pentagonal_i + pentagonal_j
UpperCAmelCase__ : List[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase__ ) and is_pentagonal(lowerCAmelCase__ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 181
|
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
UpperCamelCase__ = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
UpperCamelCase__ = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="dummy_doc" ) -> Dict:
UpperCAmelCase__ : List[str] = {doc: key_lines}
UpperCAmelCase__ : int = {doc: sys_lines}
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase__ : int = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : str = reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase__ : str = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
if remove_nested:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase__ : Dict = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : str = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'''Number of resulting singleton clusters in the key '''
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'''files, respectively''' )
return doc_coref_infos
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ : str = get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Optional[int] = 0
for name, metric in metrics:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
UpperCAmelCase__ : Any = (conll / 3) * 1_00
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : int = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
UpperCAmelCase__ : str = line.split()[5]
if not parse_col == "-":
UpperCAmelCase__ : Tuple = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Tuple , _A : Dict=True , _A : Optional[int]=False , _A : str=False , _A : List[str]=False ):
'''simple docstring'''
UpperCAmelCase__ : Any = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
UpperCAmelCase__ : int = util.check_gold_parse_annotation(_A )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase__ : List[str] = evaluate(
key_lines=_A , sys_lines=_A , metrics=_A , NP_only=_A , remove_nested=_A , keep_singletons=_A , min_span=_A , )
return score
| 181
| 1
|
"""simple docstring"""
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
a = '''bert-base-cased'''
a = '''fp16'''
a = '''bf16'''
a = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
super().setUp()
_A = dict(
ACCELERATE_USE_FSDP='true' , MASTER_ADDR='localhost' , MASTER_PORT='10999' , RANK='0' , LOCAL_RANK='0' , WORLD_SIZE='1' , )
def lowerCAmelCase_ ( self : Tuple ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(_UpperCAmelCase ):
_A = self.dist_env.copy()
_A = F'''{i + 1}'''
_A = strategy
with mockenv_context(**_UpperCAmelCase ):
_A = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def lowerCAmelCase_ ( self : Optional[int] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(_UpperCAmelCase ):
_A = self.dist_env.copy()
_A = prefetch_policy
with mockenv_context(**_UpperCAmelCase ):
_A = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def lowerCAmelCase_ ( self : List[str] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(_UpperCAmelCase ):
_A = self.dist_env.copy()
_A = state_dict_type
with mockenv_context(**_UpperCAmelCase ):
_A = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = AutoModel.from_pretrained(_UpperCAmelCase )
for policy in FSDP_AUTO_WRAP_POLICY:
_A = self.dist_env.copy()
_A = policy
if policy == "TRANSFORMER_BASED_WRAP":
_A = 'BertLayer'
elif policy == "SIZE_BASED_WRAP":
_A = '2000'
with mockenv_context(**_UpperCAmelCase ):
_A = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_UpperCAmelCase )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_A = self.dist_env.copy()
_A = 'TRANSFORMER_BASED_WRAP'
_A = 'T5Layer'
with mockenv_context(**_UpperCAmelCase ):
_A = FullyShardedDataParallelPlugin()
with self.assertRaises(_UpperCAmelCase ) as cm:
fsdp_plugin.set_auto_wrap_policy(_UpperCAmelCase )
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) )
_A = self.dist_env.copy()
_A = 'SIZE_BASED_WRAP'
_A = '0'
with mockenv_context(**_UpperCAmelCase ):
_A = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_UpperCAmelCase )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def lowerCAmelCase_ ( self : Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_A = self.dist_env.copy()
_A = mp_dtype
with mockenv_context(**_UpperCAmelCase ):
_A = Accelerator()
if mp_dtype == "fp16":
_A = torch.floataa
elif mp_dtype == "bf16":
_A = torch.bfloataa
_A = MixedPrecision(param_dtype=_UpperCAmelCase , reduce_dtype=_UpperCAmelCase , buffer_dtype=_UpperCAmelCase )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , _UpperCAmelCase )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , _UpperCAmelCase ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_A = self.dist_env.copy()
_A = str(_UpperCAmelCase ).lower()
with mockenv_context(**_UpperCAmelCase ):
_A = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=_UpperCAmelCase ) )
@require_fsdp
@require_multi_gpu
@slow
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
super().setUp()
_A = 0.82
_A = [
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
_A = {
'multi_gpu_fp16': 3_200,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2_000,
'fsdp_full_shard_transformer_based_wrap_fp16': 1_900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_A = 160
_A = 160
_A = inspect.getfile(accelerate.test_utils )
_A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] )
def lowerCAmelCase_ ( self : str ):
_A = os.path.join(self.test_scripts_folder , 'test_performance.py' )
_A = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
_A = cmd.copy()
for i, strategy in enumerate(_UpperCAmelCase ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no' )
else:
cmd_config.append('--mixed_precision=fp16' )
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
def lowerCAmelCase_ ( self : Dict ):
_A = os.path.join(self.test_scripts_folder , 'test_checkpointing.py' )
_A = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(_UpperCAmelCase ):
_A = cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
_A = len(_UpperCAmelCase )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_A = cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
'--partial_train_epoch=1',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
_A = cmd_config[:-1]
_A = os.path.join(self.tmpdir , 'epoch_0' )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
def lowerCAmelCase_ ( self : List[str] ):
_A = os.path.join(self.test_scripts_folder , 'test_peak_memory_usage.py' )
_A = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_A = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'] )
else:
cmd_config.extend(['--mixed_precision=no'] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'] )
for i, strategy in enumerate(_UpperCAmelCase ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
| 271
|
"""simple docstring"""
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
_A = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _snake_case ( _snake_case : str ) -> dict[str, str]:
'''simple docstring'''
_A = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_A = remove_duplicates(key.upper() )
_A = len(_snake_case )
# First fill cipher with key characters
_A = {alphabet[i]: char for i, char in enumerate(_snake_case )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_snake_case ) , 26 ):
_A = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_A = alphabet[i - offset]
_A = char
return cipher_alphabet
def _snake_case ( _snake_case : str , _snake_case : dict[str, str] ) -> str:
'''simple docstring'''
return "".join(cipher_map.get(_snake_case , _snake_case ) for ch in message.upper() )
def _snake_case ( _snake_case : str , _snake_case : dict[str, str] ) -> str:
'''simple docstring'''
_A = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_snake_case , _snake_case ) for ch in message.upper() )
def _snake_case ( ) -> None:
'''simple docstring'''
_A = input('Enter message to encode or decode: ' ).strip()
_A = input('Enter keyword: ' ).strip()
_A = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
_A = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
_A = create_cipher_map(_snake_case )
print(func(_snake_case , _snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 271
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89
|
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_UpperCamelCase : Optional[Any] = TypeVar('T')
class a ( Generic[T] ):
def __init__( self , _lowerCamelCase = True ):
lowercase = {} # dictionary of lists
lowercase = directed
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCamelCase )
self.adj_list[destination_vertex].append(_lowerCamelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCamelCase )
lowercase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_lowerCamelCase )
lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase = [destination_vertex]
lowercase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCamelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCamelCase )
lowercase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase = [destination_vertex]
lowercase = []
return self
def __repr__( self ):
return pformat(self.adj_list )
| 220
| 0
|
import copy
import re
class __a :
__lowercase : Optional[Any] = 'hp'
__lowercase : Tuple = {}
__lowercase : Optional[Any] = None
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__: int = prefix
lowercase__: int = defaults
cls.build_naming_info()
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
if len(__lowerCamelCase ) == 0:
return ""
lowercase__: Optional[Any] = None
if any(char.isdigit() for char in word ):
raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__lowerCamelCase ) + 1 ):
lowercase__: str = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
lowercase__: Dict = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__ ):
lowercase__: Any = ''
while integer != 0:
lowercase__: Dict = chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
lowercase__: Union[str, Any] = 0
while True:
lowercase__: Optional[int] = word + '#' + int_to_alphabetic(__lowerCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
lowercase__: Union[str, Any] = sword
break
lowercase__: List[Any] = short_word
lowercase__: Optional[int] = word
return short_word
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = param_name.split('_' )
lowercase__: Dict = [TrialShortNamer.shortname_for_word(__lowerCamelCase , __lowerCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
lowercase__: List[Any] = ['', '_']
for separator in separators:
lowercase__: Union[str, Any] = separator.join(__lowerCamelCase )
if shortname not in info["reverse_short_param"]:
lowercase__: List[Any] = shortname
lowercase__: Optional[Any] = param_name
return shortname
return param_name
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = TrialShortNamer.shortname_for_key(__lowerCamelCase , __lowerCamelCase )
lowercase__: Union[str, Any] = short_name
lowercase__: int = param_name
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ) -> List[Any]:
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
lowercase__: Dict = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
lowercase__: Optional[Any] = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__lowerCamelCase , __lowerCamelCase )
lowercase__: Optional[int] = info
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
lowercase__: Any = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
lowercase__: List[Any] = cls.NAMING_INFO['short_param'][k]
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase__: int = 1 if v else 0
lowercase__: int = '' if isinstance(__lowerCamelCase , (int, float) ) else '-'
lowercase__: Optional[int] = F'{key}{sep}{v}'
name.append(__lowerCamelCase )
return "_".join(__lowerCamelCase )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: str = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
lowercase__: Dict = []
else:
lowercase__: str = repr.split('_' )
lowercase__: str = {}
for value in values:
if "-" in value:
lowercase__ , lowercase__: str = value.split('-' )
else:
lowercase__: Optional[Any] = re.sub('[0-9.]' , '' , __lowerCamelCase )
lowercase__: Any = float(re.sub('[^0-9.]' , '' , __lowerCamelCase ) )
lowercase__: List[Any] = cls.NAMING_INFO['reverse_short_param'][p_k]
lowercase__: int = p_v
for k in cls.DEFAULTS:
if k not in parameters:
lowercase__: Tuple = cls.DEFAULTS[k]
return parameters
| 367
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __a ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> int:
'''simple docstring'''
super().__init__()
lowercase__: Union[str, Any] = pad_token_id
lowercase__: List[str] = max_length
lowercase__: int = vocab
lowercase__: List[Any] = merges
lowercase__: str = BytePairTokenizer(lowerCAmelCase__ , lowerCAmelCase__ , sequence_length=lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Tuple = [' '.join(lowerCAmelCase__ ) for m in tokenizer.bpe_ranks.keys()]
lowercase__: List[Any] = tokenizer.get_vocab()
return cls(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__: int = GPTaTokenizer.from_pretrained(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
return cls.from_tokenizer(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
return cls(**lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = self.tf_tokenizer(lowerCAmelCase__ )
lowercase__: List[Any] = tf.ones_like(lowerCAmelCase__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowercase__: int = max_length if max_length is not None else self.max_length
if max_length is not None:
lowercase__ , lowercase__: List[Any] = pad_model_inputs(
lowerCAmelCase__ , max_seq_length=lowerCAmelCase__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 288
| 0
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ): # noqa: E741
"""simple docstring"""
while r - l > 1:
_UpperCAmelCase = (l + r) // 2
if v[m] >= key:
_UpperCAmelCase = m
else:
_UpperCAmelCase = m # noqa: E741
return r
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if len(lowercase ) == 0:
return 0
_UpperCAmelCase = [0] * len(lowercase )
_UpperCAmelCase = 1
_UpperCAmelCase = v[0]
for i in range(1 ,len(lowercase ) ):
if v[i] < tail[0]:
_UpperCAmelCase = v[i]
elif v[i] > tail[length - 1]:
_UpperCAmelCase = v[i]
length += 1
else:
_UpperCAmelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289
|
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 289
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""image_processor""", """tokenizer"""]
UpperCamelCase__ = """FlavaImageProcessor"""
UpperCamelCase__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Union[str, Any] , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : str )->List[Any]:
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop('''feature_extractor''' )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.image_processor
def __call__( self : List[Any] , __UpperCamelCase : Optional[ImageInput] = None , __UpperCamelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[bool, str, PaddingStrategy] = False , __UpperCamelCase : Union[bool, str, TruncationStrategy] = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[str, TensorType]] = None , **__UpperCamelCase : Union[str, Any] , )->List[Any]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCAmelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
if images is not None:
_UpperCAmelCase = self.image_processor(
__UpperCamelCase , return_image_mask=__UpperCamelCase , return_codebook_pixels=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
if text is not None and images is not None:
encoding.update(__UpperCamelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def lowercase__ ( self : Any , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[int] )->Any:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Dict , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Dict )->Optional[Any]:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def lowercase__ ( self : Any )->Any:
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : Dict )->Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCamelCase , )
return self.image_processor_class
@property
def lowercase__ ( self : Any )->Tuple:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCamelCase , )
return self.image_processor
| 364
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A : Union[str, Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A : Tuple = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__A : List[str] = spec.loader.load_module()
__A : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A : List[str] = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
_UpperCAmelCase = False
# source code of `config_class`
_UpperCAmelCase = inspect.getsource(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = _re_checkpoint.findall(_SCREAMING_SNAKE_CASE )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_UpperCAmelCase , _UpperCAmelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase = True
break
_UpperCAmelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = '''\n'''.join(sorted(_SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 326
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase_ = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 279
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 279
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __A (snake_case__):
'''simple docstring'''
def __init__( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = []
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->Tuple:
"""simple docstring"""
self.events.append("""on_init_end""" )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict ) ->List[Any]:
"""simple docstring"""
self.events.append("""on_train_begin""" )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
self.events.append("""on_train_end""" )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
self.events.append("""on_epoch_begin""" )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]:
"""simple docstring"""
self.events.append("""on_epoch_end""" )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
self.events.append("""on_step_begin""" )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : int ) ->Dict:
"""simple docstring"""
self.events.append("""on_step_end""" )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : Any ) ->Optional[Any]:
"""simple docstring"""
self.events.append("""on_evaluate""" )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[str] ) ->Dict:
"""simple docstring"""
self.events.append("""on_predict""" )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Dict ) ->Dict:
"""simple docstring"""
self.events.append("""on_save""" )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any ) ->str:
"""simple docstring"""
self.events.append("""on_log""" )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict:
"""simple docstring"""
self.events.append("""on_prediction_step""" )
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = tempfile.mkdtemp()
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.output_dir )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Optional[int]=64 , UpperCAmelCase_ : List[Any]=64 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : int ) ->List[Any]:
"""simple docstring"""
snake_case_ = RegressionDataset(length=UpperCAmelCase_ )
snake_case_ = RegressionDataset(length=UpperCAmelCase_ )
snake_case_ = RegressionModelConfig(a=UpperCAmelCase_ , b=UpperCAmelCase_ )
snake_case_ = RegressionPreTrainedModel(UpperCAmelCase_ )
snake_case_ = TrainingArguments(self.output_dir , disable_tqdm=UpperCAmelCase_ , report_to=[] , **UpperCAmelCase_ )
return Trainer(
UpperCAmelCase_ , UpperCAmelCase_ , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , callbacks=UpperCAmelCase_ , )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) ->Optional[Any]:
"""simple docstring"""
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
# Order doesn't matter
snake_case_ = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : cb.__name__ if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else cb.__class__.__name__ )
snake_case_ = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : cb.__name__ if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else cb.__class__.__name__ )
for cba, cba in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(UpperCAmelCase_ , cba.__class__ )
elif not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(cba.__class__ , UpperCAmelCase_ )
else:
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : List[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = ["""on_init_end""", """on_train_begin"""]
snake_case_ = 0
snake_case_ = len(trainer.get_eval_dataloader() )
snake_case_ = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(UpperCAmelCase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ = self.get_trainer()
snake_case_ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
# Callbacks passed at init are added to the default callbacks
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case_ = self.get_trainer(disable_tqdm=UpperCAmelCase_ )
snake_case_ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case_ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(UpperCAmelCase_ )
expected_callbacks.remove(UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
snake_case_ = self.get_trainer()
snake_case_ = trainer.pop_callback(UpperCAmelCase_ )
self.assertEqual(cb.__class__ , UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
trainer.add_callback(UpperCAmelCase_ )
expected_callbacks.insert(0 , UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
# We can also add, pop, or remove by instance
snake_case_ = self.get_trainer()
snake_case_ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(UpperCAmelCase_ )
expected_callbacks.remove(UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
snake_case_ = self.get_trainer()
snake_case_ = trainer.callback_handler.callbacks[0]
snake_case_ = trainer.pop_callback(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
trainer.add_callback(UpperCAmelCase_ )
expected_callbacks.insert(0 , UpperCAmelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=UpperCAmelCase_ )
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
# Independent log/save/eval
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
snake_case_ = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
# A bit of everything
snake_case_ = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case_ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_ ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case_ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(UpperCAmelCase_ ) in warn_mock.call_args[0][0]
| 233
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[Any] = """mobilenet_v1"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : List[Any]=224 , UpperCAmelCase_ : List[Any]=1.0 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : int="relu6" , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]=0.999 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[Any]=0.001 , **UpperCAmelCase_ : Any , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = depth_multiplier
snake_case_ = min_depth
snake_case_ = hidden_act
snake_case_ = tf_padding
snake_case_ = classifier_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def lowerCAmelCase ( self : int ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def lowerCAmelCase ( self : int ) ->float:
"""simple docstring"""
return 1E-4
| 233
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''LayoutLMv2ImageProcessor'''
snake_case_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCamelCase__ , )
__lowerCamelCase = kwargs.pop('feature_extractor' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> BatchEncoding:
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__lowerCamelCase = self.image_processor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCamelCase = features['words']
__lowerCamelCase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
# add pixel values
__lowerCamelCase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowerCamelCase = self.get_overflowing_images(lowerCamelCase__ , encoded_inputs['overflow_to_sample_mapping'] )
__lowerCamelCase = images
return encoded_inputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(lowerCamelCase__ )} and {len(lowerCamelCase__ )}""" )
return images_with_overflow
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCamelCase__ , )
return self.image_processor_class
@property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCamelCase__ , )
return self.image_processor
| 90
|
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = [1]
lowercase__ , lowercase__ , lowercase__ = 0, 0, 0
lowercase__ = ugly_nums[ia] * 2
lowercase__ = ugly_nums[ia] * 3
lowercase__ = ugly_nums[ia] * 5
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = min(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
ugly_nums.append(lowerCamelCase_ )
if next_num == next_a:
ia += 1
lowercase__ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowercase__ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowercase__ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(2_00) = }")
| 207
| 0
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase_ = 4
UpperCamelCase_ = 3
class a_ ( _snake_case ):
pass
def A ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
for shard in shards:
for i in range(__UpperCAmelCase ):
yield {"i": i, "shard": shard}
def A ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = int(os.environ['''RANK'''] )
UpperCAmelCase_ = int(os.environ['''WORLD_SIZE'''] )
UpperCAmelCase_ = ArgumentParser()
parser.add_argument('''--streaming''' , type=__UpperCAmelCase )
parser.add_argument('''--local_rank''' , type=__UpperCAmelCase )
parser.add_argument('''--num_workers''' , type=__UpperCAmelCase , default=0 )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.streaming
UpperCAmelCase_ = args.num_workers
UpperCAmelCase_ = {'''shards''': [f"shard_{shard_idx}" for shard_idx in range(__UpperCAmelCase )]}
UpperCAmelCase_ = IterableDataset.from_generator(__UpperCAmelCase , gen_kwargs=__UpperCAmelCase )
if not streaming:
UpperCAmelCase_ = Dataset.from_list(list(__UpperCAmelCase ) )
UpperCAmelCase_ = split_dataset_by_node(__UpperCAmelCase , rank=__UpperCAmelCase , world_size=__UpperCAmelCase )
UpperCAmelCase_ = torch.utils.data.DataLoader(__UpperCAmelCase , num_workers=__UpperCAmelCase )
UpperCAmelCase_ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCAmelCase_ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCAmelCase_ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 364
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[int] ="levit"
def __init__( self :List[str] , _lowercase :List[Any]=224 , _lowercase :str=3 , _lowercase :Optional[int]=3 , _lowercase :str=2 , _lowercase :List[Any]=1 , _lowercase :str=16 , _lowercase :Dict=[128, 256, 384] , _lowercase :Union[str, Any]=[4, 8, 12] , _lowercase :Tuple=[4, 4, 4] , _lowercase :Dict=[16, 16, 16] , _lowercase :Any=0 , _lowercase :Dict=[2, 2, 2] , _lowercase :Any=[2, 2, 2] , _lowercase :Tuple=0.02 , **_lowercase :Union[str, Any] , ) -> Optional[Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = depths
UpperCAmelCase_ = key_dim
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = attention_ratio
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a_ ( _snake_case ):
UpperCamelCase__ : Union[str, Any] =version.parse("1.11" )
@property
def __a ( self :Any) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def __a ( self :List[Any]) -> float:
return 1E-4
| 344
| 0
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowercase: int = "src/diffusers"
_lowercase: Dict = "."
# This is to make sure the diffusers module imported is the one in the repo.
_lowercase: str = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowercase: List[str] = spec.loader.load_module()
def a( A : Optional[int] , A : Tuple ) -> Optional[Any]:
"""simple docstring"""
return line.startswith(A ) or len(A ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , A ) is not None
def a( A : List[Any] ) -> str:
"""simple docstring"""
a = object_name.split("." )
a = 0
# First let's find the module where our object lives.
a = parts[i]
while i < len(A ) and not os.path.isfile(os.path.join(A , f'''{module}.py''' ) ):
i += 1
if i < len(A ):
a = os.path.join(A , parts[i] )
if i >= len(A ):
raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(A , f'''{module}.py''' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
a = f.readlines()
# Now let's find the class / func in the code!
a = ""
a = 0
for name in parts[i + 1 :]:
while (
line_index < len(A ) and re.search(rf'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(A ):
raise ValueError(f''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
a = line_index
while line_index < len(A ) and _should_continue(lines[line_index] , A ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a = lines[start_index:line_index]
return "".join(A )
_lowercase: str = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
_lowercase: List[str] = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
_lowercase: str = re.compile(r"<FILL\s+[^>]*>")
def a( A : List[Any] ) -> Any:
"""simple docstring"""
a = code.split("\n" )
a = 0
while idx < len(A ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(A ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def a( A : int ) -> Union[str, Any]:
"""simple docstring"""
a = len(get_indent(A ) ) > 0
if has_indent:
a = f'''class Bla:\n{code}'''
a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=A )
a = black.format_str(A , mode=A )
a , a = style_docstrings_in_code(A )
return result[len("class Bla:\n" ) :] if has_indent else result
def a( A : Optional[int] , A : Tuple=False ) -> Union[str, Any]:
"""simple docstring"""
with open(A , "r" , encoding="utf-8" , newline="\n" ) as f:
a = f.readlines()
a = []
a = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(A ):
a = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
a , a , a = search.groups()
a = find_code_in_diffusers(A )
a = get_indent(A )
a = line_index + 1 if indent == theoretical_indent else line_index + 2
a = theoretical_indent
a = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
a = True
while line_index < len(A ) and should_continue:
line_index += 1
if line_index >= len(A ):
break
a = lines[line_index]
a = _should_continue(A , A ) and re.search(f'''^{indent}# End copy''' , A ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a = lines[start_index:line_index]
a = "".join(A )
# Remove any nested `Copied from` comments to avoid circular copies
a = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(A ) is None]
a = "\n".join(A )
# Before comparing, use the `replace_pattern` on the original code.
if len(A ) > 0:
a = replace_pattern.replace("with" , "" ).split("," )
a = [_re_replace_pattern.search(A ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
a , a , a = pattern.groups()
a = re.sub(A , A , A )
if option.strip() == "all-casing":
a = re.sub(obja.lower() , obja.lower() , A )
a = re.sub(obja.upper() , obja.upper() , A )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
a = blackify(lines[start_index - 1] + theoretical_code )
a = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
a = lines[:start_index] + [theoretical_code] + lines[line_index:]
a = start_index + 1
if overwrite and len(A ) > 0:
# Warn the user a file has been modified.
print(f'''Detected changes, rewriting {filename}.''' )
with open(A , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(A )
return diffs
def a( A : bool = False ) -> List[Any]:
"""simple docstring"""
a = glob.glob(os.path.join(A , "**/*.py" ) , recursive=A )
a = []
for filename in all_files:
a = is_copy_consistent(A , A )
diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(A ) > 0:
a = "\n".join(A )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
_lowercase: Dict = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowercase: Tuple = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 227
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def a( A : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def a( A : np.ndarray , A : np.ndarray ) -> XGBClassifier:
"""simple docstring"""
a = XGBClassifier()
classifier.fit(A , A )
return classifier
def a( ) -> None:
"""simple docstring"""
a = load_iris()
a , a = data_handling(A )
a , a , a , a = train_test_split(
A , A , test_size=0.25 )
a = iris["target_names"]
# Create an XGBoost Classifier from the training data
a = xgboost(A , A )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
A , A , A , display_labels=A , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 227
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = '''wavlm'''
def __init__( self : Tuple , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Any=7_6_8 , lowerCAmelCase_ : str=1_2 , lowerCAmelCase_ : Any=1_2 , lowerCAmelCase_ : Optional[int]=3_0_7_2 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : int=0.02 , lowerCAmelCase_ : int=1e-5 , lowerCAmelCase_ : str="group" , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCAmelCase_ : Dict=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase_ : List[Any]=(1_0, 3, 3, 3, 3, 2, 2) , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : str=1_2_8 , lowerCAmelCase_ : List[str]=1_6 , lowerCAmelCase_ : Tuple=3_2_0 , lowerCAmelCase_ : Union[str, Any]=8_0_0 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any=0.05 , lowerCAmelCase_ : Tuple=1_0 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : Tuple=1_0 , lowerCAmelCase_ : Tuple=3_2_0 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Union[str, Any]=1_0_0 , lowerCAmelCase_ : Dict=2_5_6 , lowerCAmelCase_ : str=2_5_6 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]="mean" , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Dict=2_5_6 , lowerCAmelCase_ : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowerCAmelCase_ : List[str]=(5, 3, 3, 1, 1) , lowerCAmelCase_ : Union[str, Any]=(1, 2, 3, 1, 1) , lowerCAmelCase_ : Optional[int]=5_1_2 , lowerCAmelCase_ : str=8_0 , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
_A: str = hidden_size
_A: str = feat_extract_norm
_A: Optional[Any] = feat_extract_activation
_A: Optional[Any] = list(lowerCAmelCase_ )
_A: Optional[int] = list(lowerCAmelCase_ )
_A: str = list(lowerCAmelCase_ )
_A: int = conv_bias
_A: Optional[Any] = num_buckets
_A: Dict = max_bucket_distance
_A: Optional[int] = num_conv_pos_embeddings
_A: str = num_conv_pos_embedding_groups
_A: Dict = len(self.conv_dim )
_A: Dict = num_hidden_layers
_A: List[str] = intermediate_size
_A: List[str] = hidden_act
_A: Tuple = num_attention_heads
_A: str = hidden_dropout
_A: Any = attention_dropout
_A: List[Any] = activation_dropout
_A: int = feat_proj_dropout
_A: List[str] = final_dropout
_A: Optional[Any] = layerdrop
_A: Any = layer_norm_eps
_A: Optional[int] = initializer_range
_A: Union[str, Any] = num_ctc_classes
_A: Optional[int] = vocab_size
_A: Any = do_stable_layer_norm
_A: Dict = use_weighted_layer_sum
_A: int = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A: Dict = apply_spec_augment
_A: Union[str, Any] = mask_time_prob
_A: Optional[Any] = mask_time_length
_A: List[Any] = mask_time_min_masks
_A: Optional[Any] = mask_feature_prob
_A: Any = mask_feature_length
# parameters for pretraining with codevector quantized representations
_A: Optional[Any] = num_codevectors_per_group
_A: Tuple = num_codevector_groups
_A: Any = contrastive_logits_temperature
_A: List[Any] = num_negatives
_A: Union[str, Any] = codevector_dim
_A: List[str] = proj_codevector_dim
_A: List[Any] = diversity_loss_weight
# ctc loss
_A: Optional[Any] = ctc_loss_reduction
_A: str = ctc_zero_infinity
# adapter
_A: str = add_adapter
_A: Optional[int] = adapter_kernel_size
_A: str = adapter_stride
_A: Dict = num_adapter_layers
_A: Optional[int] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_A: Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_A: Optional[Any] = list(lowerCAmelCase_ )
_A: Optional[Any] = list(lowerCAmelCase_ )
_A: Dict = list(lowerCAmelCase_ )
_A: Optional[int] = xvector_output_dim
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 301
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301
| 1
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __A :
"""simple docstring"""
@staticmethod
def __lowercase ( *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
__UpperCamelCase : Dict =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__UpperCamelCase : int =image_classifier(lowerCamelCase__ , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCamelCase__ ) , [
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
] , )
__UpperCamelCase : Tuple =image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
[
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
],
] , )
@require_tf
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
__UpperCamelCase : str =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__UpperCamelCase : List[Any] =image_classifier(lowerCamelCase__ , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] , )
__UpperCamelCase : Tuple =image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
[
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
{'score': 0.333, 'label': ANY(lowerCamelCase__ )},
],
] , )
@slow
@require_torch
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
__UpperCamelCase : List[str] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__UpperCamelCase : int =image_classifier(lowerCamelCase__ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
__UpperCamelCase : Optional[Any] =image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
__UpperCamelCase : List[Any] =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__UpperCamelCase : Optional[int] =image_classifier(lowerCamelCase__ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
__UpperCamelCase : Optional[Any] =image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
| 71
|
from torch import nn
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
super().__init__()
__UpperCamelCase : Dict =class_size
__UpperCamelCase : Any =embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__UpperCamelCase : Any =nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.mlp(lowerCamelCase__ )
return logits
| 71
| 1
|
'''simple docstring'''
from functools import lru_cache
def __a ( UpperCAmelCase ) ->set:
"""simple docstring"""
A = 2
A = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCAmelCase )
if n > 1:
factors.add(UpperCAmelCase )
return factors
@lru_cache
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
return len(unique_prime_factors(UpperCAmelCase ) )
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
return len(set(UpperCAmelCase ) ) in (0, 1)
def __a ( UpperCAmelCase ) ->list:
"""simple docstring"""
A = 2
while True:
# Increment each value of a generated range
A = [base + i for i in range(UpperCAmelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
A = [upf_len(UpperCAmelCase ) for x in group]
checker.append(UpperCAmelCase )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCAmelCase ):
return group
# Increment our base variable by 1
base += 1
def __a ( UpperCAmelCase = 4 ) ->int:
"""simple docstring"""
A = run(UpperCAmelCase )
return results[0] if len(UpperCAmelCase ) else None
if __name__ == "__main__":
print(solution())
| 360
|
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
if isinstance(UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCAmelCase :
'''simple docstring'''
def A (self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
pass
def A (self : List[str] ):
pass
def A (self : Union[str, Any] ):
pass
def A (self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ):
A = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = {"""vision_model""": vision_model, """text_model""": text_model}
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A (self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
A = after_output[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def A (self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any=None , **_lowerCAmelCase : List[Any] ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ):
A = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def A (self : List[str] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def A (self : Optional[int] ):
A = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def A (self : List[Any] ):
A = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def A (self : int ):
A = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def A (self : Tuple ):
A , A = self.get_pretrained_model_and_inputs()
A = model_a(**_lowerCAmelCase )
A = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
A = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
A = model_a(**_lowerCAmelCase )
A = after_outputs[0].numpy()
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : int ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
A = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Union[str, Any] ):
A = TFViTModelTester(self )
A = TFBertModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[int] ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : Any ):
A , A = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
A = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
A = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
A = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A = to_atuple(vision_model.config.image_size )
A = to_atuple(vision_model.config.patch_size )
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : str ):
A = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
A = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : str ):
A = TFDeiTModelTester(self )
A = TFRobertaModelTester(self )
A = vit_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
def A (self : Dict ):
A = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
A = 13
A = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A = random_attention_mask([batch_size, 4] )
A = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def A (self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
A = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def A (self : Optional[Any] ):
A = TFCLIPVisionModelTester(self )
A = TFBertModelTester(self )
A = clip_model_tester.prepare_config_and_inputs()
A = bert_model_tester.prepare_config_and_inputs()
A , A = vision_config_and_inputs
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A (self : Any ):
A = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
A = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
A = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 337
| 0
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __a ( __lowerCamelCase ):
return (data["data"], data["target"])
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = XGBRegressor(verbosity=0, random_state=42 )
xgb.fit(__lowerCamelCase, __lowerCamelCase )
# Predict target for test data
UpperCAmelCase_ : Union[str, Any] = xgb.predict(__lowerCamelCase )
UpperCAmelCase_ : int = predictions.reshape(len(__lowerCamelCase ), 1 )
return predictions
def __a ( ):
UpperCAmelCase_ : List[Any] = fetch_california_housing()
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = data_handling(__lowerCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = train_test_split(
__lowerCamelCase, __lowerCamelCase, test_size=0.25, random_state=1 )
UpperCAmelCase_ : int = xgboost(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(__lowerCamelCase, __lowerCamelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(__lowerCamelCase, __lowerCamelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 61
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
UpperCamelCase__ = True
except (ImportError, ModuleNotFoundError):
UpperCamelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a__ ( lowerCAmelCase__ ) -> str:
re.sub('''<n>''' , '''''' , lowerCAmelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCAmelCase__ ) )
| 181
| 0
|
from math import factorial
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'''If a class of 40 students must be arranged into groups of''',
f'''4 for group projects, there are {combinations(40, 4)} ways''',
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f'''are {combinations(10, 3)} ways that first, second and''',
'''third place can be awarded.''',
)
| 363
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return choice(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = random_pivot(_SCREAMING_SNAKE_CASE )
# partition based on pivot
# linear time
UpperCamelCase = [e for e in lst if e < pivot]
UpperCamelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_SCREAMING_SNAKE_CASE ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_SCREAMING_SNAKE_CASE ) < k - 1:
return kth_number(_SCREAMING_SNAKE_CASE , k - len(_SCREAMING_SNAKE_CASE ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A__ ( unittest.TestCase):
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
lowerCamelCase : int = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
lowerCamelCase : Tuple = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowerCAmelCase_ )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.dummy_uncond_unet
lowerCamelCase : Optional[Any] = DDIMScheduler()
lowerCamelCase : Tuple = self.dummy_vq_model
lowerCamelCase : Optional[Any] = LDMPipeline(unet=lowerCAmelCase_ , vqvae=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
ldm.to(lowerCAmelCase_ )
ldm.set_progress_bar_config(disable=lowerCAmelCase_ )
lowerCamelCase : Dict = torch.manual_seed(0 )
lowerCamelCase : str = ldm(generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""numpy""" ).images
lowerCamelCase : int = torch.manual_seed(0 )
lowerCamelCase : List[Any] = ldm(generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=lowerCAmelCase_ )[0]
lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase : Optional[Any] = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
lowerCamelCase : Dict = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(lowerCAmelCase_ )
ldm.set_progress_bar_config(disable=lowerCAmelCase_ )
lowerCamelCase : List[Any] = torch.manual_seed(0 )
lowerCamelCase : Tuple = ldm(generator=lowerCAmelCase_ , num_inference_steps=5 , output_type="""numpy""" ).images
lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
lowerCamelCase : Any = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
lowerCamelCase : int = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 287
|
def lowerCamelCase__ ( a , a ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_A: Union[str, Any] = str(bin(a ) )[2:] # remove the leading "0b"
_A: Union[str, Any] = str(bin(a ) )[2:] # remove the leading "0b"
_A: Optional[int] = max(len(a ) , len(a ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(a ) , b_binary.zfill(a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121
| 0
|
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : Optional[int] = 1
while repunit:
SCREAMING_SNAKE_CASE : List[str] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__UpperCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 363
|
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=2, A=56, A=True, A=True, A=True, A=True, A=99, A=32, A=2, A=2, A=7, A="gelu_new", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=4, A="block_sparse", A=True, A=False, A=2, A=3, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Dict = use_attention_mask
SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
SCREAMING_SNAKE_CASE : int = rescale_embeddings
SCREAMING_SNAKE_CASE : Any = attention_type
SCREAMING_SNAKE_CASE : str = use_bias
SCREAMING_SNAKE_CASE : Tuple = block_size
SCREAMING_SNAKE_CASE : List[Any] = num_random_blocks
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = BigBirdConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=A, initializer_range=self.initializer_range, attention_type=self.attention_type, block_size=self.block_size, num_random_blocks=self.num_random_blocks, use_bias=self.use_bias, rescale_embeddings=self.rescale_embeddings, )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
A : List[Any] = False
A : List[str] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(A, A )
SCREAMING_SNAKE_CASE : List[str] = model_class(A )
@jax.jit
def model_jitted(A, A=None, **A ):
return model(input_ids=A, attention_mask=A, **A )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE : List[str] = model_jitted(**A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : Optional[Any] = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ), len(A ) )
for jitted_output, output in zip(A, A ):
self.assertEqual(jitted_output.shape, output.shape )
def UpperCamelCase_ ( self, A, A, A, A=1E-5, A="outputs", A=None ):
'''simple docstring'''
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(A, A, A, A, A, A )
| 246
| 0
|
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def __snake_case ( UpperCAmelCase_ : Iterable[str] , UpperCAmelCase_ : int ):
lowerCamelCase_ = iter(UpperCAmelCase_ )
while True:
lowerCamelCase_ = tuple(itertools.islice(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not chunk:
return
yield chunk
def __snake_case ( UpperCAmelCase_ : str ):
lowerCamelCase_ = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
lowerCamelCase_ = ""
if len(UpperCAmelCase_ ) < 2:
return dirty
for i in range(len(UpperCAmelCase_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCAmelCase_ ) & 1:
clean += "X"
return clean
def __snake_case ( UpperCAmelCase_ : str ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
lowerCamelCase_ = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowerCamelCase_ = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCAmelCase_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCAmelCase_ )
return table
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
lowerCamelCase_ = generate_table(UpperCAmelCase_ )
lowerCamelCase_ = prepare_input(UpperCAmelCase_ )
lowerCamelCase_ = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
lowerCamelCase_ ,lowerCamelCase_ = divmod(table.index(UpperCAmelCase_ ) , 5 )
lowerCamelCase_ ,lowerCamelCase_ = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
lowerCamelCase_ = generate_table(UpperCAmelCase_ )
lowerCamelCase_ = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
lowerCamelCase_ ,lowerCamelCase_ = divmod(table.index(UpperCAmelCase_ ) , 5 )
lowerCamelCase_ ,lowerCamelCase_ = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 55
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__snake_case = logging.getLogger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Any=16 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 2 ):
def get_dataset(SCREAMING_SNAKE_CASE__ : List[Any] ):
UpperCamelCase :Union[str, Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(SCREAMING_SNAKE_CASE__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCamelCase :str = get_dataset(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = get_dataset(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , num_workers=4 )
UpperCamelCase :Any = DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any=None ):
UpperCamelCase :Dict = []
for epoch in range(SCREAMING_SNAKE_CASE__ ):
# Train quickly
model.train()
for batch in dataloader:
UpperCamelCase , UpperCamelCase :Optional[Any] = batch
UpperCamelCase :int = model(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.backward(SCREAMING_SNAKE_CASE__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> str:
super().__init__()
UpperCamelCase :Optional[int] = nn.Parameter(torch.randn(1 ) )
UpperCamelCase :int = nn.Parameter(torch.randn(1 ) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return x * self.a + self.b
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase :Optional[Any] = DummyModel()
UpperCamelCase :List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase , UpperCamelCase :Tuple = dummy_dataloaders()
UpperCamelCase :Tuple = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCamelCase :Dict = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Union[str, Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def UpperCAmelCase ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase :List[str] = DummyModel()
UpperCamelCase :Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase , UpperCamelCase :Dict = dummy_dataloaders()
# Train baseline
UpperCamelCase :Dict = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :int = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
UpperCamelCase :int = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Optional[Any] = model.a.item(), model.b.item()
UpperCamelCase :Optional[int] = optimizer.state_dict()
UpperCamelCase :Optional[int] = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Dict = model.a.item(), model.b.item()
UpperCamelCase :Optional[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase :Any = DummyModel()
UpperCamelCase :List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase , UpperCamelCase :List[Any] = dummy_dataloaders()
UpperCamelCase :List[str] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Tuple = model.a.item(), model.b.item()
UpperCamelCase :Tuple = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Union[str, Any] = model.a.item(), model.b.item()
UpperCamelCase :Optional[Any] = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase :List[Any] = DummyModel()
UpperCamelCase :Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase , UpperCamelCase :int = dummy_dataloaders()
UpperCamelCase :int = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCamelCase :Union[str, Any] = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((UpperCamelCase) , (UpperCamelCase)) :List[str] = model.a.item(), model.b.item()
UpperCamelCase :Dict = optimizer.state_dict()
UpperCamelCase :Any = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Optional[int] = model.a.item(), model.b.item()
UpperCamelCase :Any = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase :Union[str, Any] = DummyModel()
UpperCamelCase :List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase , UpperCamelCase :Tuple = dummy_dataloaders()
UpperCamelCase :Optional[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((UpperCamelCase) , (UpperCamelCase)) :Dict = model.a.item(), model.b.item()
UpperCamelCase :Dict = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Optional[Any] = model.a.item(), model.b.item()
UpperCamelCase :str = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[Any] = torch.tensor([1, 2, 3] )
UpperCamelCase :Any = torch.tensor([2, 3, 4] )
UpperCamelCase :Optional[Any] = DummyModel()
UpperCamelCase :Optional[Any] = torch.optim.Adam(net.parameters() )
UpperCamelCase :Optional[Any] = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase :List[Any] = DummyModel()
UpperCamelCase :List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase :Any = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.99 )
UpperCamelCase , UpperCamelCase :Any = dummy_dataloaders()
UpperCamelCase :Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCamelCase :str = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
UpperCamelCase :int = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def UpperCAmelCase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase :Optional[Any] = DummyModel()
UpperCamelCase :int = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
UpperCamelCase :Tuple = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :int = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = """/tmp/accelerate/state_checkpointing"""
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__snake_case = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__snake_case , __snake_case = dummy_dataloaders()
__snake_case = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__snake_case = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__snake_case , __snake_case = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__snake_case = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
__snake_case = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
__snake_case = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
__snake_case = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 259
| 0
|
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
__UpperCamelCase = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
__UpperCamelCase = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def a_ ( self) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32')),
'references': datasets.Sequence(datasets.Value('int32')),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32'),
'references': datasets.Value('int32'),
}), reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'], )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None, lowerCAmelCase__=1, lowerCAmelCase__="binary", lowerCAmelCase__=None, lowerCAmelCase__="warn", ) -> Optional[Any]:
snake_case_ = recall_score(
lowerCAmelCase__, lowerCAmelCase__, labels=lowerCAmelCase__, pos_label=lowerCAmelCase__, average=lowerCAmelCase__, sample_weight=lowerCAmelCase__, zero_division=lowerCAmelCase__, )
return {"recall": float(lowerCAmelCase__) if score.size == 1 else score}
| 351
|
"""simple docstring"""
from math import pi
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 312
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( __lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = ['''image_processor''', '''tokenizer''']
UpperCAmelCase = '''BlipImageProcessor'''
UpperCAmelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self ,a_ ,a_ ) -> List[str]:
_UpperCAmelCase : int = False
super().__init__(lowerCAmelCase_ ,lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = self.image_processor
def __call__( self ,a_ = None ,a_ = None ,a_ = True ,a_ = False ,a_ = None ,a_ = None ,a_ = 0 ,a_ = None ,a_ = None ,a_ = False ,a_ = False ,a_ = False ,a_ = False ,a_ = False ,a_ = True ,a_ = None ,**a_ ,) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
_UpperCAmelCase : int = self.tokenizer
_UpperCAmelCase : str = self.tokenizer(
text=lowerCAmelCase_ ,add_special_tokens=lowerCAmelCase_ ,padding=lowerCAmelCase_ ,truncation=lowerCAmelCase_ ,max_length=lowerCAmelCase_ ,stride=lowerCAmelCase_ ,pad_to_multiple_of=lowerCAmelCase_ ,return_attention_mask=lowerCAmelCase_ ,return_overflowing_tokens=lowerCAmelCase_ ,return_special_tokens_mask=lowerCAmelCase_ ,return_offsets_mapping=lowerCAmelCase_ ,return_token_type_ids=lowerCAmelCase_ ,return_length=lowerCAmelCase_ ,verbose=lowerCAmelCase_ ,return_tensors=lowerCAmelCase_ ,**lowerCAmelCase_ ,)
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCAmelCase_ ,return_tensors=lowerCAmelCase_ )
if text is not None:
_UpperCAmelCase : Tuple = self.tokenizer(
text=lowerCAmelCase_ ,add_special_tokens=lowerCAmelCase_ ,padding=lowerCAmelCase_ ,truncation=lowerCAmelCase_ ,max_length=lowerCAmelCase_ ,stride=lowerCAmelCase_ ,pad_to_multiple_of=lowerCAmelCase_ ,return_attention_mask=lowerCAmelCase_ ,return_overflowing_tokens=lowerCAmelCase_ ,return_special_tokens_mask=lowerCAmelCase_ ,return_offsets_mapping=lowerCAmelCase_ ,return_token_type_ids=lowerCAmelCase_ ,return_length=lowerCAmelCase_ ,verbose=lowerCAmelCase_ ,return_tensors=lowerCAmelCase_ ,**lowerCAmelCase_ ,)
else:
_UpperCAmelCase : List[str] = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase_ )
return encoding_image_processor
def _snake_case ( self ,*a_ ,**a_ ) -> Optional[Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase_ ,**lowerCAmelCase_ )
def _snake_case ( self ,*a_ ,**a_ ) -> Optional[int]:
return self.tokenizer.decode(*lowerCAmelCase_ ,**lowerCAmelCase_ )
@property
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : List[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 215
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Tuple = JukeboxTokenizer
lowerCamelCase :str = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def UpperCAmelCase ( self ) -> Tuple:
import torch
_A = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
_A = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase ( self ) -> List[str]:
import torch
_A = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
_A = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 180
| 0
|
"""simple docstring"""
import requests
_a = """YOUR API KEY"""
def __a ( __lowerCamelCase, __lowerCamelCase = giphy_api_key ):
UpperCAmelCase_ : Optional[Any] = '+'.join(query.split() )
UpperCAmelCase_ : Any = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
UpperCAmelCase_ : Dict = requests.get(_A ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 352
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 23
| 0
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCAmelCase : str ="""src/diffusers"""
__lowerCAmelCase : List[str] ="""."""
# This is to make sure the diffusers module imported is the one in the repo.
__lowerCAmelCase : Any =importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
__lowerCAmelCase : Optional[int] =spec.loader.load_module()
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
return line.startswith(lowerCAmelCase__ ) or len(lowerCAmelCase__ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , lowerCAmelCase__ ) is not None
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
lowercase = object_name.split(""".""" )
lowercase = 0
# First let's find the module where our object lives.
lowercase = parts[i]
while i < len(lowerCAmelCase__ ) and not os.path.isfile(os.path.join(lowerCAmelCase__ , f'{module}.py' ) ):
i += 1
if i < len(lowerCAmelCase__ ):
lowercase = os.path.join(lowerCAmelCase__ , parts[i] )
if i >= len(lowerCAmelCase__ ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(lowerCAmelCase__ , f'{module}.py' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase = f.readlines()
# Now let's find the class / func in the code!
lowercase = """"""
lowercase = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase__ ) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowercase = line_index
while line_index < len(lowerCAmelCase__ ) and _should_continue(lines[line_index] , lowerCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase = lines[start_index:line_index]
return "".join(lowerCAmelCase__ )
__lowerCAmelCase : Tuple =re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
__lowerCAmelCase : Dict =re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
__lowerCAmelCase : Union[str, Any] =re.compile(R"""<FILL\s+[^>]*>""")
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase = code.split("""\n""" )
lowercase = 0
while idx < len(lowerCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase__ ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> Any:
'''simple docstring'''
lowercase = len(get_indent(lowerCAmelCase__ ) ) > 0
if has_indent:
lowercase = f'class Bla:\n{code}'
lowercase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=lowerCAmelCase__ )
lowercase = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ )
lowercase , lowercase = style_docstrings_in_code(lowerCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any]=False ) -> Tuple:
'''simple docstring'''
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase = f.readlines()
lowercase = []
lowercase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase__ ):
lowercase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowercase , lowercase , lowercase = search.groups()
lowercase = find_code_in_diffusers(lowerCAmelCase__ )
lowercase = get_indent(lowerCAmelCase__ )
lowercase = line_index + 1 if indent == theoretical_indent else line_index + 2
lowercase = theoretical_indent
lowercase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowercase = True
while line_index < len(lowerCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
break
lowercase = lines[line_index]
lowercase = _should_continue(lowerCAmelCase__ , lowerCAmelCase__ ) and re.search(f'^{indent}# End copy' , lowerCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase = lines[start_index:line_index]
lowercase = """""".join(lowerCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
lowercase = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(lowerCAmelCase__ ) is None]
lowercase = """\n""".join(lowerCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase__ ) > 0:
lowercase = replace_pattern.replace("""with""" , """""" ).split(""",""" )
lowercase = [_re_replace_pattern.search(lowerCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowercase , lowercase , lowercase = pattern.groups()
lowercase = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if option.strip() == "all-casing":
lowercase = re.sub(obja.lower() , obja.lower() , lowerCAmelCase__ )
lowercase = re.sub(obja.upper() , obja.upper() , lowerCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowercase = blackify(lines[start_index - 1] + theoretical_code )
lowercase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowercase = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowercase = start_index + 1
if overwrite and len(lowerCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCAmelCase__ )
return diffs
def UpperCAmelCase__ ( lowerCAmelCase__ :bool = False ) -> str:
'''simple docstring'''
lowercase = glob.glob(os.path.join(lowerCAmelCase__ , """**/*.py""" ) , recursive=lowerCAmelCase__ )
lowercase = []
for filename in all_files:
lowercase = is_copy_consistent(lowerCAmelCase__ , lowerCAmelCase__ )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowerCAmelCase__ ) > 0:
lowercase = """\n""".join(lowerCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase : Optional[Any] =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 197
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : str ={
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class _A ( lowerCAmelCase ):
snake_case__ : Tuple = 'dpr'
def __init__( self , __lowerCAmelCase=3_0522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase="absolute" , __lowerCAmelCase = 0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = projection_dim
lowercase = position_embedding_type
| 197
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73
|
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCAmelCase_( lowercase_ : List[str]="" ) -> str:
_lowerCamelCase = tempfile.mkdtemp()
return os.path.join(lowercase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_lowerCamelCase = AgentAudio(lowerCamelCase__ )
_lowerCamelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
# Ensure that the file contains the same value as the original tensor
_lowerCamelCase , _lowerCamelCase = sf.read(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , torch.tensor(lowerCamelCase__ ) , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_lowerCamelCase = get_new_path(suffix='''.wav''' )
sf.write(lowerCamelCase__ , lowerCamelCase__ , 1_6_0_0_0 )
_lowerCamelCase = AgentAudio(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , lowerCamelCase__ )
@require_vision
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
_lowerCamelCase = AgentImage(lowerCamelCase__ )
_lowerCamelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def snake_case__ ( self ):
_lowerCamelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_lowerCamelCase = Image.open(lowerCamelCase__ )
_lowerCamelCase = AgentImage(lowerCamelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def snake_case__ ( self ):
_lowerCamelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_lowerCamelCase = Image.open(lowerCamelCase__ )
_lowerCamelCase = AgentImage(lowerCamelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = '''Hey!'''
_lowerCamelCase = AgentText(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , agent_type.to_string() )
self.assertEqual(lowerCamelCase__ , agent_type.to_raw() )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 73
| 1
|
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
A : List[Any] = b * b - 4 * a * c
A : Tuple = (-b + sqrt(a_ )) / (2 * a)
A : List[Any] = (-b - sqrt(a_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[Any] = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 3
|
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def UpperCAmelCase ( a_ ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A_ : List[str] = []
A_ : Dict = []
A_ : List[Any] = []
for rt in rc.restypes:
A_ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A_ : Union[str, Any] = {name: i for i, name in enumerate(a_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
A_ : Tuple = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : Optional[int] = torch.tensor(
a_ , dtype=torch.intaa , device=protein["""aatype"""].device , )
A_ : List[Any] = torch.tensor(
a_ , dtype=torch.floataa , device=protein["""aatype"""].device , )
A_ : Optional[int] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A_ : Dict = restype_atomaa_to_atomaa[protein_aatype]
A_ : Optional[Any] = restype_atomaa_mask[protein_aatype]
A_ : Any = residx_atomaa_mask
A_ : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A_ : Tuple = restype_atomaa_to_atomaa[protein_aatype]
A_ : Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A_ : Optional[Any] = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
A_ : Optional[Any] = rc.restype_atoa[restype_letter]
A_ : Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A_ : Any = rc.atom_order[atom_name]
A_ : Optional[int] = 1
A_ : Optional[int] = restype_atomaa_mask[protein_aatype]
A_ : Dict = residx_atomaa_mask
return protein
def UpperCAmelCase ( a_ ) -> Dict[str, np.ndarray]:
"""simple docstring"""
A_ : Union[str, Any] = tree_map(lambda a_ : torch.tensor(a_ , device=batch["""aatype"""].device ) , a_ , np.ndarray )
A_ : Optional[int] = tensor_tree_map(lambda a_ : np.array(a_ ) , make_atomaa_masks(a_ ) )
return out
| 344
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 34
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCamelCase_ = float('''nan''')
class __lowerCamelCase :
def __init__( self , lowerCamelCase ) -> Any:
snake_case_ = sys.stdout
snake_case_ = open(lowerCamelCase , """a""" )
def __getattr__( self , lowerCamelCase ) -> Optional[int]:
return getattr(self.stdout , lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Optional[int]:
self.stdout.write(lowerCamelCase )
# strip tqdm codes
self.file.write(re.sub(r"""^.*\r""" , """""" , lowerCamelCase , 0 , re.M ) )
def UpperCamelCase( lowercase_=80 , lowercase_=False ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = []
# deal with critical env vars
snake_case_ = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
snake_case_ = os.environ.get(lowercase_ , lowercase_ )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
snake_case_ = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(lowercase_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
snake_case_ = []
snake_case_ = """"""
while len(lowercase_ ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(lowercase_ ) == 0 or len(lowercase_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(lowercase_ )
snake_case_ = """"""
return "\\\n".join(lowercase_ )
def UpperCamelCase( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
snake_case_ = re.sub(r"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
snake_case_ = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
snake_case_ = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
snake_case_ = subprocess.run(lowercase_ , capture_output=lowercase_ , text=lowercase_ )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
snake_case_ = variation.replace(""" """ , """-""" )
with open(Path(lowercase_ ) / f'''log.{prefix}.stdout.txt''' , """w""" ) as f:
f.write(result.stdout )
with open(Path(lowercase_ ) / f'''log.{prefix}.stderr.txt''' , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , """r""" , encoding="""utf-8""" ) as f:
snake_case_ = json.load(lowercase_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[Any]:
'''simple docstring'''
snake_case_ = []
snake_case_ = []
snake_case_ = f'''{id}: {variation:<{longest_variation_len}}'''
snake_case_ = f'''{preamble}: '''
snake_case_ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(lowercase_ ) , desc=lowercase_ , leave=lowercase_ ):
snake_case_ = process_run_single(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ = single_run_metrics[target_metric_key]
if not math.isnan(lowercase_ ):
metrics.append(lowercase_ )
results.append(lowercase_ )
outcome += "✓"
else:
outcome += "✘"
snake_case_ = f'''\33[2K\r{outcome}'''
if len(lowercase_ ) > 0:
snake_case_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
snake_case_ = round(mean_metrics[target_metric_key] , 2 )
snake_case_ = f'''{outcome} {mean_target}'''
if len(lowercase_ ) > 1:
results_str += f''' {tuple(round(lowercase_ , 2 ) for x in results )}'''
print(lowercase_ )
snake_case_ = variation
return mean_metrics
else:
print(lowercase_ )
return {variation_key: variation, target_metric_key: nan}
def UpperCamelCase( ) -> Any:
'''simple docstring'''
snake_case_ = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
snake_case_ = pd.DataFrame(lowercase_ )
snake_case_ = """variation"""
snake_case_ = """diff_%"""
snake_case_ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
snake_case_ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(lowercase_ ):
# as a fallback, use the minimal value as the sentinel
snake_case_ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(lowercase_ ):
snake_case_ = df.apply(
lambda lowercase_ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
snake_case_ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
snake_case_ = df.reindex(lowercase_ , axis="""columns""" ) # reorder cols
# capitalize
snake_case_ = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
snake_case_ = df.rename(lambda lowercase_ : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
snake_case_ = df.rename(lambda lowercase_ : c.replace("""_""" , """\n""" ) , axis="""columns""" )
snake_case_ = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=lowercase_ , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=lowercase_ , floatfmt=""".2f""" )]
print("""\n\n""".join(lowercase_ ) )
def UpperCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=lowercase_ , type=lowercase_ , required=lowercase_ , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=lowercase_ , type=lowercase_ , nargs="""+""" , required=lowercase_ , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=lowercase_ , type=lowercase_ , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=lowercase_ , type=lowercase_ , required=lowercase_ , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=lowercase_ , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=lowercase_ , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=lowercase_ , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=lowercase_ , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
snake_case_ = parser.parse_args()
snake_case_ = args.output_dir
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
snake_case_ = get_base_command(lowercase_ , lowercase_ )
# split each dimension into its --foo variations
snake_case_ = [list(map(str.strip , re.split(r"""\|""" , lowercase_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
snake_case_ = list(map(str.strip , map(""" """.join , itertools.product(*lowercase_ ) ) ) )
snake_case_ = max(len(lowercase_ ) for x in variations )
# split wanted keys
snake_case_ = args.report_metric_keys.split()
# capture prints into a log file for convenience
snake_case_ = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
snake_case_ = Tee(lowercase_ )
print(f'''\n*** Running {len(lowercase_ )} benchmarks:''' )
print(f'''Base command: {" ".join(lowercase_ )}''' )
snake_case_ = """variation"""
snake_case_ = []
for id, variation in enumerate(tqdm(lowercase_ , desc="""Total completion: """ , leave=lowercase_ ) ):
snake_case_ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , args.target_metric_key , lowercase_ , args.repeat_times , lowercase_ , args.verbose , ) )
process_results(lowercase_ , args.target_metric_key , lowercase_ , args.base_variation , lowercase_ )
if __name__ == "__main__":
main()
| 34
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self ) -> Tuple:
lowerCAmelCase_ :Dict = []
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> List[str]:
self.events.append("""on_init_end""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Any:
self.events.append("""on_train_begin""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Tuple:
self.events.append("""on_train_end""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Any:
self.events.append("""on_epoch_begin""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Optional[Any]:
self.events.append("""on_epoch_end""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Optional[Any]:
self.events.append("""on_step_begin""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Any:
self.events.append("""on_step_end""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Any:
self.events.append("""on_evaluate""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> int:
self.events.append("""on_predict""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Union[str, Any]:
self.events.append("""on_save""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Dict:
self.events.append("""on_log""" )
def __lowerCAmelCase ( self , __A , __A , __A , **__A ) -> Optional[Any]:
self.events.append("""on_prediction_step""" )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Optional[int] = tempfile.mkdtemp()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
shutil.rmtree(self.output_dir )
def __lowerCAmelCase ( self , __A=0 , __A=0 , __A=64 , __A=64 , __A=None , __A=False , **__A ) -> Optional[int]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowerCAmelCase_ :Optional[Any] = RegressionDataset(length=__A )
lowerCAmelCase_ :str = RegressionDataset(length=__A )
lowerCAmelCase_ :Dict = RegressionModelConfig(a=__A , b=__A )
lowerCAmelCase_ :Dict = RegressionPreTrainedModel(__A )
lowerCAmelCase_ :Any = TrainingArguments(self.output_dir , disable_tqdm=__A , report_to=[] , **__A )
return Trainer(
__A , __A , train_dataset=__A , eval_dataset=__A , callbacks=__A , )
def __lowerCAmelCase ( self , __A , __A ) -> List[Any]:
self.assertEqual(len(__A ) , len(__A ) )
# Order doesn't matter
lowerCAmelCase_ :Optional[int] = sorted(__A , key=lambda __A : cb.__name__ if isinstance(__A , __A ) else cb.__class__.__name__ )
lowerCAmelCase_ :List[Any] = sorted(__A , key=lambda __A : cb.__name__ if isinstance(__A , __A ) else cb.__class__.__name__ )
for cba, cba in zip(__A , __A ):
if isinstance(__A , __A ) and isinstance(__A , __A ):
self.assertEqual(__A , __A )
elif isinstance(__A , __A ) and not isinstance(__A , __A ):
self.assertEqual(__A , cba.__class__ )
elif not isinstance(__A , __A ) and isinstance(__A , __A ):
self.assertEqual(cba.__class__ , __A )
else:
self.assertEqual(__A , __A )
def __lowerCAmelCase ( self , __A ) -> int:
lowerCAmelCase_ :Optional[int] = ["""on_init_end""", """on_train_begin"""]
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :Optional[Any] = len(trainer.get_eval_dataloader() )
lowerCAmelCase_ :List[Any] = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(__A ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = self.get_trainer()
lowerCAmelCase_ :Optional[int] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
# Callbacks passed at init are added to the default callbacks
lowerCAmelCase_ :Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(__A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowerCAmelCase_ :List[Any] = self.get_trainer(disable_tqdm=__A )
lowerCAmelCase_ :Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowerCAmelCase_ :Optional[int] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(__A )
expected_callbacks.remove(__A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
lowerCAmelCase_ :int = self.get_trainer()
lowerCAmelCase_ :Tuple = trainer.pop_callback(__A )
self.assertEqual(cb.__class__ , __A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
trainer.add_callback(__A )
expected_callbacks.insert(0 , __A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
# We can also add, pop, or remove by instance
lowerCAmelCase_ :str = self.get_trainer()
lowerCAmelCase_ :Optional[int] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(__A )
expected_callbacks.remove(__A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
lowerCAmelCase_ :int = self.get_trainer()
lowerCAmelCase_ :Tuple = trainer.callback_handler.callbacks[0]
lowerCAmelCase_ :Dict = trainer.pop_callback(__A )
self.assertEqual(__A , __A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
trainer.add_callback(__A )
expected_callbacks.insert(0 , __A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , __A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=__A )
lowerCAmelCase_ :Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowerCAmelCase_ :Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
# Independent log/save/eval
lowerCAmelCase_ :Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowerCAmelCase_ :Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
lowerCAmelCase_ :Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowerCAmelCase_ :Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
lowerCAmelCase_ :Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
lowerCAmelCase_ :List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
lowerCAmelCase_ :Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
lowerCAmelCase_ :Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
# A bit of everything
lowerCAmelCase_ :Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowerCAmelCase_ :Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(__A , self.get_expected_events(__A ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
lowerCAmelCase_ :Optional[int] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(__A ) in warn_mock.call_args[0][0]
| 84
|
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__lowerCAmelCase = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
__lowerCAmelCase = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
__lowerCAmelCase = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Optional[Any]:
_snake_case = spearmanr(UpperCAmelCase , UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 341
| 0
|
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
lowerCAmelCase_ = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
lowerCAmelCase_ = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def __a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __a ( self : Any , _A : Dict , _A : Any , _A : Any=None , _A : Any="uniform_average" , _A : Optional[Any]=True ) -> Dict:
"""simple docstring"""
lowercase : Any = mean_squared_error(
_A , _A , sample_weight=_A , multioutput=_A , squared=_A )
return {"mse": mse}
| 116
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowerCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase_ = get_tests_dir('fixtures/vocab.json')
lowerCAmelCase_ = get_tests_dir('fixtures')
class _A ( unittest.TestCase ):
_UpperCamelCase : int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Union[str, Any] = 0
def __a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(_A , _A )
def __a ( self : Any ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str = WavaVecaConfig()
lowercase : Tuple = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(_A )
processor.save_pretrained(_A )
lowercase : Optional[Any] = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_A , os.path.join(_A , _A ) )
copyfile(_A , os.path.join(_A , '''vocab.json''' ) )
lowercase : Any = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : Tuple ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : List[Any] = WavaVecaFeatureExtractor()
lowercase : Optional[int] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
lowercase : Union[str, Any] = WavaVecaProcessor(_A , _A )
# save in new folder
processor.save_pretrained(_A )
# drop `processor_class` in tokenizer
with open(os.path.join(_A , _A ) , '''r''' ) as f:
lowercase : Union[str, Any] = json.load(_A )
config_dict.pop('''processor_class''' )
with open(os.path.join(_A , _A ) , '''w''' ) as f:
f.write(json.dumps(_A ) )
lowercase : int = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : str ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int = WavaVecaFeatureExtractor()
lowercase : Dict = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
lowercase : Optional[int] = WavaVecaProcessor(_A , _A )
# save in new folder
processor.save_pretrained(_A )
# drop `processor_class` in feature extractor
with open(os.path.join(_A , _A ) , '''r''' ) as f:
lowercase : int = json.load(_A )
config_dict.pop('''processor_class''' )
with open(os.path.join(_A , _A ) , '''w''' ) as f:
f.write(json.dumps(_A ) )
lowercase : Any = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(_A )
# copy relevant files
copyfile(_A , os.path.join(_A , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(_A , _A ) , '''w''' ) as f:
f.write('''{}''' )
lowercase : Tuple = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaises(_A ):
lowercase : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
lowercase : List[str] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_A )
lowercase : str = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_A )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
lowercase : Union[str, Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
lowercase : Optional[int] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowercase : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_A , use_fast=_A )
lowercase : Any = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , _A )
AutoFeatureExtractor.register(_A , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
AutoProcessor.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoProcessor.register(_A , _A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase : Optional[Any] = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : List[Any] = os.path.join(_A , '''vocab.txt''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase : Optional[int] = CustomTokenizer(_A )
lowercase : Union[str, Any] = CustomProcessor(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_A )
lowercase : Dict = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __a ( self : str ) -> Any:
"""simple docstring"""
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = False
class _A ( _lowerCamelCase ):
_UpperCamelCase : Union[str, Any] = False
class _A ( _lowerCamelCase ):
_UpperCamelCase : Tuple = '''AutoFeatureExtractor'''
_UpperCamelCase : Any = '''AutoTokenizer'''
_UpperCamelCase : Any = False
try:
AutoConfig.register('''custom''' , _A )
AutoFeatureExtractor.register(_A , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
AutoProcessor.register(_A , _A )
# If remote code is not set, the default is to use local classes.
lowercase : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_A )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_A )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __a ( self : Any ) -> int:
"""simple docstring"""
lowercase : Any = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class _A ( unittest.TestCase ):
_UpperCamelCase : int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def __a ( cls : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase : str = TOKEN
HfFolder.save_token(_A )
@classmethod
def __a ( cls : List[Any] ) -> Dict:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Any = WavaVecaProcessor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_A , '''test-processor''' ) , push_to_hub=_A , use_auth_token=self._token )
lowercase : Any = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_A , getattr(new_processor.feature_extractor , _A ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __a ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase : int = WavaVecaProcessor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_A , '''test-processor-org''' ) , push_to_hub=_A , use_auth_token=self._token , organization='''valid_org''' , )
lowercase : Dict = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_A , getattr(new_processor.feature_extractor , _A ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __a ( self : str ) -> Dict:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase : List[str] = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Optional[int] = os.path.join(_A , '''vocab.txt''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowercase : Optional[Any] = CustomTokenizer(_A )
lowercase : Optional[Any] = CustomProcessor(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
lowercase : List[str] = Repository(_A , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(_A )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_A , '''tokenizer_config.json''' ) ) as f:
lowercase : Optional[Any] = json.load(_A )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_A , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_A , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_A , '''custom_processing.py''' ) ) )
repo.push_to_hub()
lowercase : Tuple = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=_A )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 116
| 1
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = CLIPConfig
lowerCamelCase = ['CLIPEncoderLayer']
def __init__( self : int,lowercase_ : CLIPConfig )-> Union[str, Any]:
'''simple docstring'''
super().__init__(lowercase_ )
A__ = CLIPVisionModelWithProjection(config.vision_config )
A__ = nn.Linear(config.vision_config.projection_dim,1 )
A__ = nn.Linear(config.vision_config.projection_dim,1 )
@torch.no_grad()
def snake_case__ ( self : Optional[int],lowercase_ : List[Any],lowercase_ : Dict,lowercase_ : str=0.5,lowercase_ : List[Any]=0.5 )-> List[str]:
'''simple docstring'''
A__ = self.vision_model(lowercase_ )[0]
A__ = self.p_head(lowercase_ )
A__ = nsfw_detected.flatten()
A__ = nsfw_detected > p_threshold
A__ = nsfw_detected.tolist()
if any(lowercase_ ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(lowercase_ ):
if nsfw_detected_:
A__ = np.zeros(images[idx].shape )
A__ = self.w_head(lowercase_ )
A__ = watermark_detected.flatten()
A__ = watermark_detected > w_threshold
A__ = watermark_detected.tolist()
if any(lowercase_ ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(lowercase_ ):
if watermark_detected_:
A__ = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 7
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase ):
@register_to_config
def __init__( self : List[str] , UpperCAmelCase : int = 65536 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 0 , UpperCAmelCase : str = "fourier" , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase : str = None , UpperCAmelCase : Tuple[int] = (32, 32, 64) , UpperCAmelCase : str = None , UpperCAmelCase : int = 8 , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = False , ) -> List[Any]:
super().__init__()
lowerCamelCase__ : Optional[int] = sample_size
# time
if time_embedding_type == "fourier":
lowerCamelCase__ : Optional[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase , log=UpperCAmelCase , flip_sin_to_cos=UpperCAmelCase )
lowerCamelCase__ : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCamelCase__ : List[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase , downscale_freq_shift=UpperCAmelCase )
lowerCamelCase__ : Dict = block_out_channels[0]
if use_timestep_embedding:
lowerCamelCase__ : str = block_out_channels[0] * 4
lowerCamelCase__ : List[Any] = TimestepEmbedding(
in_channels=UpperCAmelCase , time_embed_dim=UpperCAmelCase , act_fn=UpperCAmelCase , out_dim=block_out_channels[0] , )
lowerCamelCase__ : Any = nn.ModuleList([] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = nn.ModuleList([] )
lowerCamelCase__ : Optional[int] = None
# down
lowerCamelCase__ : Optional[int] = in_channels
for i, down_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = output_channel
lowerCamelCase__ : Tuple = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCamelCase__ : Union[str, Any] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Optional[int] = get_down_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase )
# mid
lowerCamelCase__ : Optional[int] = get_mid_block(
UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase , add_downsample=UpperCAmelCase , )
# up
lowerCamelCase__ : Optional[int] = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
lowerCamelCase__ : List[str] = out_channels
else:
lowerCamelCase__ : Any = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Tuple = output_channel
lowerCamelCase__ : Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase ) - 1 else final_upsample_channels
)
lowerCamelCase__ : List[str] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Dict = get_up_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : int = output_channel
# out
lowerCamelCase__ : int = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCamelCase__ : List[Any] = get_out_block(
out_block_type=UpperCAmelCase , num_groups_out=UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase , act_fn=UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def A_ ( self : List[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
lowerCamelCase__ : Optional[Any] = timestep
if not torch.is_tensor(UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] = timesteps[None].to(sample.device )
lowerCamelCase__ : Optional[int] = self.time_proj(UpperCAmelCase )
if self.config.use_timestep_embedding:
lowerCamelCase__ : str = self.time_mlp(UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = timestep_embed[..., None]
lowerCamelCase__ : str = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCamelCase__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCamelCase__ : str = ()
for downsample_block in self.down_blocks:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = downsample_block(hidden_states=UpperCAmelCase , temb=UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCamelCase__ : Optional[Any] = self.mid_block(UpperCAmelCase , UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCamelCase__ : Dict = down_block_res_samples[-1:]
lowerCamelCase__ : Optional[Any] = down_block_res_samples[:-1]
lowerCamelCase__ : Any = upsample_block(UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , temb=UpperCAmelCase )
# 5. post-process
if self.out_block:
lowerCamelCase__ : Any = self.out_block(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase )
| 50
| 0
|
import qiskit
def __UpperCamelCase ( _A = 2 ):
lowerCAmelCase_ = qubits
# Using Aer's simulator
lowerCAmelCase_ = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
lowerCAmelCase_ = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCAmelCase_ = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=1000 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(f"Total count for various states are: {quantum_entanglement(3)}")
| 356
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
__snake_case = ['pixel_values']
def __init__( self, UpperCamelCase__ = True, UpperCamelCase__ = 32, UpperCamelCase__=PILImageResampling.BILINEAR, UpperCamelCase__ = True, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = size_divisor
lowerCAmelCase_ = resample
super().__init__(**UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(UpperCamelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCAmelCase_ = height // size_divisor * size_divisor
lowerCAmelCase_ = width // size_divisor * size_divisor
lowerCAmelCase_ = resize(UpperCamelCase__, (new_h, new_w), resample=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ )
return image
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__ ):
"""simple docstring"""
return rescale(image=UpperCamelCase__, scale=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=None, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = ChannelDimension.FIRST, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = size_divisor if size_divisor is not None else self.size_divisor
lowerCAmelCase_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
lowerCAmelCase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(UpperCamelCase__ ) for img in images]
if do_resize:
lowerCAmelCase_ = [self.resize(UpperCamelCase__, size_divisor=UpperCamelCase__, resample=UpperCamelCase__ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(UpperCamelCase__, scale=1 / 255 ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(UpperCamelCase__, UpperCamelCase__ ) for image in images]
lowerCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__, tensor_type=UpperCamelCase__ )
| 167
| 0
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ''
lowerCamelCase__ : str = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self, lowercase_ = None, lowercase_ = None, **lowercase_, ) -> Dict:
"""simple docstring"""
super().__init__(self, **lowercase_ )
a__ =repo_info
a__ =token
a__ =None
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
if self.dir_cache is None:
a__ ={}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a__ ={
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(lowercase_ ): {'''name''': str(lowercase_ ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _UpperCAmelCase ( self, lowercase_, lowercase_ = "rb", **lowercase_, ) -> str:
"""simple docstring"""
if not isinstance(self.repo_info, lowercase_ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
a__ =hf_hub_url(self.repo_info.id, lowercase_, revision=self.repo_info.sha )
return fsspec.open(
lowercase_, mode=lowercase_, headers=get_authentication_headers_for_url(lowercase_, use_auth_token=self.token ), client_kwargs={'''trust_env''': True}, ).open()
def _UpperCAmelCase ( self, lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
self._get_dirs()
a__ =self._strip_protocol(lowercase_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowercase_ )
def _UpperCAmelCase ( self, lowercase_, lowercase_=False, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
self._get_dirs()
a__ =PurePosixPath(path.strip('''/''' ) )
a__ ={}
for p, f in self.dir_cache.items():
a__ =PurePosixPath(p.strip('''/''' ) )
a__ =p.parent
if root == path:
a__ =f
a__ =list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 188
|
import os
import string
import sys
lowerCamelCase = 1 << 8
lowerCamelCase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
lowerCamelCase = KEYMAP['''up''']
lowerCamelCase = KEYMAP['''left''']
if sys.platform == "win32":
lowerCamelCase = []
lowerCamelCase = {
b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase = ord(str(i))
def UpperCAmelCase__ ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
a__ ='''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
a__ =msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
a__ =ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
a__ =chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
a__ =chr(KEYMAP['''esc'''] )
except KeyError:
a__ =cha[1]
else:
a__ =ch.decode(_A )
else:
a__ =WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
a__ =sys.stdin.fileno()
a__ =termios.tcgetattr(_A )
try:
tty.setraw(_A )
a__ =sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
a__ =get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
a__ =get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 188
| 1
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def lowerCamelCase_ ( lowerCamelCase__ ):
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "vit.embeddings.cls_token" )
if "mask_token" in name:
lowerCamelCase_ = name.replace("mask_token" , "decoder.mask_token" )
if "decoder_pos_embed" in name:
lowerCamelCase_ = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase_ = name.replace("pos_embed" , "vit.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "vit.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace("patch_embed.norm" , "vit.embeddings.norm" )
if "decoder_blocks" in name:
lowerCamelCase_ = name.replace("decoder_blocks" , "decoder.decoder_layers" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "vit.encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
lowerCamelCase_ = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
lowerCamelCase_ = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
lowerCamelCase_ = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name:
lowerCamelCase_ = name.replace("norm.weight" , "vit.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name:
lowerCamelCase_ = name.replace("norm.bias" , "vit.layernorm.bias" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[1] )
if "decoder_blocks" in key:
lowerCamelCase_ = config.decoder_hidden_size
lowerCamelCase_ = """decoder.decoder_layers."""
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
elif "bias" in key:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = config.hidden_size
lowerCamelCase_ = """vit.encoder.layer."""
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
elif "bias" in key:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCamelCase_ = 1_0_2_4
lowerCamelCase_ = 4_0_9_6
lowerCamelCase_ = 2_4
lowerCamelCase_ = 1_6
elif "huge" in checkpoint_url:
lowerCamelCase_ = 1_4
lowerCamelCase_ = 1_2_8_0
lowerCamelCase_ = 5_1_2_0
lowerCamelCase_ = 3_2
lowerCamelCase_ = 1_6
lowerCamelCase_ = ViTMAEForPreTraining(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location="cpu" )["""model"""]
lowerCamelCase_ = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase_ = convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
lowerCamelCase_ = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
lowerCamelCase_ = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase_ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase_ = model(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = outputs.logits
if "large" in checkpoint_url:
lowerCamelCase_ = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
lowerCamelCase_ = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
lowerCamelCase_ = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A =parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 370
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ):
lowerCamelCase_ = OmegaConf.load(lowerCamelCase__ )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase__ ) ) )
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ):
if conf_path is None:
lowerCamelCase_ = "./model_checkpoints/vqgan_only.yaml"
lowerCamelCase_ = load_config(lowerCamelCase__ , display=lowerCamelCase__ )
lowerCamelCase_ = VQModel(**config.model.params )
if ckpt_path is None:
lowerCamelCase_ = "./model_checkpoints/vqgan_only.pt"
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )
if ".ckpt" in ckpt_path:
lowerCamelCase_ = sd["state_dict"]
model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
model.to(lowerCamelCase__ )
del sd
return model
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = model.encode(lowerCamelCase__ )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
lowerCamelCase_ = model.decode(lowerCamelCase__ )
return xrec
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ):
lowerCamelCase_ , lowerCamelCase_ = string.rsplit("." , 1 )
if reload:
lowerCamelCase_ = importlib.import_module(lowerCamelCase__ )
importlib.reload(lowerCamelCase__ )
return getattr(importlib.import_module(lowerCamelCase__ , package=lowerCamelCase__ ) , cls )
def lowerCamelCase_ ( lowerCamelCase__ ):
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__=True ):
lowerCamelCase_ = instantiate_from_config(lowerCamelCase__ )
if sd is not None:
model.load_state_dict(lowerCamelCase__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# load the specified checkpoint
if ckpt:
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )
lowerCamelCase_ = pl_sd["global_step"]
print(F'loaded model from global step {global_step}.' )
else:
lowerCamelCase_ = {"state_dict": None}
lowerCamelCase_ = None
lowerCamelCase_ = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=lowerCamelCase__ , eval_mode=lowerCamelCase__ )["model"]
return model, global_step
| 47
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def _A ( _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
__UpperCamelCase = DetaConfig(
backbone_config=_lowercase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=_lowercase , with_box_refine=_lowercase , two_stage=_lowercase , )
# set labels
__UpperCamelCase = 'huggingface/label-files'
if "o365" in model_name:
__UpperCamelCase = 3_66
__UpperCamelCase = 'object365-id2label.json'
else:
__UpperCamelCase = 91
__UpperCamelCase = 'coco-detection-id2label.json'
__UpperCamelCase = num_labels
__UpperCamelCase = json.load(open(cached_download(hf_hub_url(_lowercase , _lowercase , repo_type='dataset' ) ) , 'r' ) )
__UpperCamelCase = {int(_lowercase ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def _A ( _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def _A ( _lowercase , _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = dct.pop(_lowercase )
__UpperCamelCase = val
def _A ( _lowercase , _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCamelCase = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__UpperCamelCase = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[:dim, :]
__UpperCamelCase = in_proj_bias[: dim]
__UpperCamelCase = in_proj_weight[
dim : dim * 2, :
]
__UpperCamelCase = in_proj_bias[
dim : dim * 2
]
__UpperCamelCase = in_proj_weight[
-dim :, :
]
__UpperCamelCase = in_proj_bias[-dim :]
# fmt: on
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCamelCase = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__UpperCamelCase = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[:hidden_size, :]
__UpperCamelCase = in_proj_bias[:hidden_size]
__UpperCamelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCamelCase = in_proj_weight[-hidden_size:, :]
__UpperCamelCase = in_proj_bias[-hidden_size:]
def _A ( ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = get_deta_config(_lowercase )
# load original state dict
if model_name == "deta-swin-large":
__UpperCamelCase = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
__UpperCamelCase = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
__UpperCamelCase = torch.load(_lowercase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(_lowercase , param.shape )
# rename keys
__UpperCamelCase = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
read_in_swin_q_k_v(_lowercase , config.backbone_config )
read_in_decoder_q_k_v(_lowercase , _lowercase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCamelCase = state_dict.pop(_lowercase )
__UpperCamelCase = val
if "input_proj" in key:
__UpperCamelCase = state_dict.pop(_lowercase )
__UpperCamelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCamelCase = state_dict.pop(_lowercase )
__UpperCamelCase = val
# finally, create HuggingFace model and load state dict
__UpperCamelCase = DetaForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
__UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(_lowercase )
# load image processor
__UpperCamelCase = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
__UpperCamelCase = prepare_img()
__UpperCamelCase = processor(images=_lowercase , return_tensors='pt' )
__UpperCamelCase = encoding['pixel_values']
__UpperCamelCase = model(pixel_values.to(_lowercase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCamelCase = torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
__UpperCamelCase = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
__UpperCamelCase = torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
__UpperCamelCase = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_lowercase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_lowercase ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 310
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __lowerCamelCase :
_lowercase = XGLMConfig
_lowercase = {}
_lowercase = """gelu"""
def __init__( self: Optional[int],A_: Dict,A_: Any=14,A_: Optional[int]=7,A_: str=True,A_: Any=True,A_: Optional[int]=True,A_: Optional[int]=99,A_: List[str]=32,A_: Any=2,A_: Tuple=4,A_: List[str]=37,A_: Dict="gelu",A_: int=0.1,A_: List[str]=0.1,A_: int=512,A_: List[Any]=0.0_2,):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def snake_case_ ( self: Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length],self.vocab_size ),clip_value_min=0,clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads],2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,num_layers=self.num_hidden_layers,attention_heads=self.num_attention_heads,ffn_dim=self.ffn_dim,activation_function=self.activation_function,activation_dropout=self.activation_dropout,attention_dropout=self.attention_dropout,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,use_cache=A_,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,return_dict=A_,)
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
), (
__UpperCamelCase
),
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase (_a , _a , unittest.TestCase ):
_lowercase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase = (TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self,config_class=A_,n_embd=37 )
def snake_case_ ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Optional[Any],A_: int=True ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]],dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase = model.generate(A_,do_sample=A_,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(),A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and',return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(A_,do_sample=A_,seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0],skip_special_tokens=A_ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_,A_ )
@slow
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(A_,return_tensors='tf',padding=A_ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=A_,attention_mask=inputs['attention_mask'],max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[0],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer(sentences[1],return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=A_,max_new_tokens=12 )
__UpperCamelCase = tokenizer.batch_decode(A_,skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = tokenizer.decode(output_padded[0],skip_special_tokens=A_ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_,A_ )
self.assertListEqual(A_,[non_padded_sentence, padded_sentence] )
| 310
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
def _snake_case ( lowercase__ , lowercase__=False ):
_lowerCamelCase : str = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
_lowerCamelCase : Dict = 'segformer.encoder.' + key
if key.startswith('backbone' ):
_lowerCamelCase : Optional[int] = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCamelCase : Dict = key[key.find('patch_embed' ) + len('patch_embed' )]
_lowerCamelCase : Union[str, Any] = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowercase__ )-1}''' )
if "norm" in key:
_lowerCamelCase : Tuple = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCamelCase : int = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
_lowerCamelCase : Optional[Any] = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowercase__ )-1}''' )
if "layer_norm1" in key:
_lowerCamelCase : Any = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
_lowerCamelCase : Dict = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
_lowerCamelCase : Dict = key[key.find('block' ) + len('block' )]
_lowerCamelCase : str = key.replace(f'''block{idx}''' , f'''block.{int(lowercase__ )-1}''' )
if "attn.q" in key:
_lowerCamelCase : Union[str, Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
_lowerCamelCase : List[Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
_lowerCamelCase : Tuple = key.replace('attn' , 'attention.self' )
if "fc1" in key:
_lowerCamelCase : int = key.replace('fc1' , 'dense1' )
if "fc2" in key:
_lowerCamelCase : Optional[Any] = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
_lowerCamelCase : Dict = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
_lowerCamelCase : List[str] = key.replace('linear_fuse.conv' , 'linear_fuse' )
_lowerCamelCase : Tuple = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCamelCase : Optional[int] = key[key.find('linear_c' ) + len('linear_c' )]
_lowerCamelCase : Optional[int] = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowercase__ )-1}''' )
if key.startswith('head' ):
_lowerCamelCase : Union[str, Any] = key.replace('head' , 'classifier' )
_lowerCamelCase : int = value
return new_state_dict
def _snake_case ( lowercase__ , lowercase__ ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCamelCase : Optional[int] = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
_lowerCamelCase : str = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
_lowerCamelCase : Dict = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCamelCase : List[Any] = kv_bias[: config.hidden_sizes[i]]
_lowerCamelCase : int = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCamelCase : str = kv_bias[
config.hidden_sizes[i] :
]
def _snake_case ( ):
_lowerCamelCase : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase : Tuple = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
@torch.no_grad()
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Optional[Any] = SegformerConfig()
_lowerCamelCase : Union[str, Any] = False
# set attributes based on model_name
_lowerCamelCase : Union[str, Any] = 'huggingface/label-files'
if "segformer" in model_name:
_lowerCamelCase : Tuple = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
_lowerCamelCase : List[Any] = 150
_lowerCamelCase : Tuple = 'ade20k-id2label.json'
_lowerCamelCase : str = (1, 150, 128, 128)
elif "city" in model_name:
_lowerCamelCase : List[Any] = 19
_lowerCamelCase : Dict = 'cityscapes-id2label.json'
_lowerCamelCase : Dict = (1, 19, 128, 128)
else:
raise ValueError(f'''Model {model_name} not supported''' )
elif "mit" in model_name:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : str = model_name[4:6]
_lowerCamelCase : Any = 1000
_lowerCamelCase : Any = 'imagenet-1k-id2label.json'
_lowerCamelCase : Union[str, Any] = (1, 1000)
else:
raise ValueError(f'''Model {model_name} not supported''' )
# set config attributes
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase : Optional[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
_lowerCamelCase : Tuple = idalabel
_lowerCamelCase : Tuple = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_lowerCamelCase : Dict = [64, 128, 320, 512]
_lowerCamelCase : List[str] = 256
elif size == "b2":
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : str = 768
_lowerCamelCase : Optional[Any] = [3, 4, 6, 3]
elif size == "b3":
_lowerCamelCase : Any = [64, 128, 320, 512]
_lowerCamelCase : Any = 768
_lowerCamelCase : int = [3, 4, 18, 3]
elif size == "b4":
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : int = 768
_lowerCamelCase : Union[str, Any] = [3, 8, 27, 3]
elif size == "b5":
_lowerCamelCase : Dict = [64, 128, 320, 512]
_lowerCamelCase : int = 768
_lowerCamelCase : Any = [3, 6, 40, 3]
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor (only resize + normalize)
_lowerCamelCase : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase__ , align=lowercase__ , do_random_crop=lowercase__ )
# prepare image
_lowerCamelCase : Dict = prepare_img()
_lowerCamelCase : Optional[Any] = image_processor(images=lowercase__ , return_tensors='pt' ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
_lowerCamelCase : Any = torch.load(lowercase__ , map_location=torch.device('cpu' ) )
else:
_lowerCamelCase : Dict = torch.load(lowercase__ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
_lowerCamelCase : Dict = rename_keys(lowercase__ , encoder_only=lowercase__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowercase__ , lowercase__ )
# create HuggingFace model and load state dict
if encoder_only:
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = SegformerForImageClassification(lowercase__ )
else:
_lowerCamelCase : Optional[int] = SegformerForSemanticSegmentation(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# forward pass
_lowerCamelCase : List[Any] = model(lowercase__ )
_lowerCamelCase : Optional[Any] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_lowerCamelCase : List[str] = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_lowerCamelCase : Union[str, Any] = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_lowerCamelCase : Union[str, Any] = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_lowerCamelCase : Union[str, Any] = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_lowerCamelCase : int = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_lowerCamelCase : Tuple = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_lowerCamelCase : Optional[int] = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_lowerCamelCase : Dict = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_lowerCamelCase : Optional[Any] = torch.tensor(
[
[
[-1.1_372E01, -1.2_787E01, -1.3_477E01],
[-1.2_536E01, -1.4_194E01, -1.4_409E01],
[-1.3_217E01, -1.4_888E01, -1.5_327E01],
],
[
[-1.4_791E01, -1.7_122E01, -1.8_277E01],
[-1.7_163E01, -1.9_192E01, -1.9_533E01],
[-1.7_897E01, -1.9_991E01, -2.0_315E01],
],
[
[7.6_723E-01, 4.1_921E-01, -7.7_878E-02],
[4.7_772E-01, 9.5_557E-03, -2.8_082E-01],
[3.6_032E-01, -2.4_826E-01, -5.1_168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_lowerCamelCase : Optional[Any] = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_lowerCamelCase : Dict = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_lowerCamelCase : Tuple = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_lowerCamelCase : List[str] = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_lowerCamelCase : Optional[int] = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_lowerCamelCase : Union[str, Any] = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
_lowerCamelCase : Optional[int] = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowercase__ , atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowercase__ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 12
|
"""simple docstring"""
import re
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(lowercase__ , lowercase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895"""))
| 12
| 1
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str ) -> list:
if n_term == "":
return []
__a = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(f'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
lowercase_ = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 45
|
from __future__ import annotations
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(__UpperCamelCase ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = XLMRobertaTokenizer
_A : Dict = XLMRobertaTokenizerFast
_A : List[Any] = True
_A : int = True
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase : Tuple = XLMRobertaTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = """<pad>"""
__lowercase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__a ) , 1002 )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = XLMRobertaTokenizer(__a , keep_accents=__a )
__lowercase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowercase : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowercase : int = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowercase : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[str] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[str] = tempfile.mkdtemp()
__lowercase : Optional[Any] = tokenizer_r.save_pretrained(__a )
__lowercase : int = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__lowercase : Optional[int] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
__lowercase : str = tokenizer_r.from_pretrained(__a )
__lowercase : Any = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
__lowercase : Dict = tempfile.mkdtemp()
__lowercase : List[Any] = tokenizer_r.save_pretrained(__a , legacy_format=__a )
__lowercase : Optional[int] = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
__lowercase : int = tokenizer_r.from_pretrained(__a )
__lowercase : List[Any] = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
__lowercase : Union[str, Any] = tempfile.mkdtemp()
__lowercase : str = tokenizer_r.save_pretrained(__a , legacy_format=__a )
__lowercase : Optional[int] = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowercase : Optional[int] = tokenizer_r.from_pretrained(__a )
__lowercase : Optional[Any] = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@cached_property
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__a , f.name )
__lowercase : int = XLMRobertaTokenizer(f.name , keep_accents=__a )
__lowercase : List[str] = pickle.dumps(__a )
pickle.loads(__a )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase : Any = self.get_tokenizer()
__lowercase : str = self.get_rust_tokenizer()
__lowercase : int = """I was born in 92000, and this is falsé."""
__lowercase : Dict = tokenizer.tokenize(__a )
__lowercase : Optional[int] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__lowercase : Optional[Any] = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
__lowercase : int = self.get_rust_tokenizer()
__lowercase : Optional[Any] = tokenizer.encode(__a )
__lowercase : Any = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
@slow
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """Hello World!"""
__lowercase : Optional[Any] = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__lowercase : str = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
__lowercase : int = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 306
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Optional[Any] = len(lowerCAmelCase_ )
__lowercase : str = len(lowerCAmelCase_ )
__lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase : Tuple = True
for i in range(lowerCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase : Optional[Any] = True
if a[i].islower():
__lowercase : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__(self : Tuple , a__ : int , a__ : Any=7 , a__ : Any=3 , a__ : Union[str, Any]=18 , a__ : str=30 , a__ : Optional[int]=400 , a__ : Any=True , a__ : Tuple=None , a__ : str=True , a__ : str=[0.5, 0.5, 0.5] , a__ : Any=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
__snake_case = size if size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_normalize
__snake_case = image_mean
__snake_case = image_std
def a (self : str ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = DPTImageProcessor if is_vision_available() else None
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = DPTImageProcessingTester(self )
@property
def a (self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , '''image_mean''' ) )
self.assertTrue(hasattr(a__ , '''image_std''' ) )
self.assertTrue(hasattr(a__ , '''do_normalize''' ) )
self.assertTrue(hasattr(a__ , '''do_resize''' ) )
self.assertTrue(hasattr(a__ , '''size''' ) )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a (self : Any ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a (self : str ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 24
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """MCTCTFeatureExtractor"""
lowerCamelCase__ = """AutoTokenizer"""
def __init__( self : Dict , __snake_case : Optional[int] , __snake_case : List[str] ) -> str:
super().__init__(__snake_case , __snake_case )
UpperCAmelCase : List[Any] = self.feature_extractor
UpperCAmelCase : Union[str, Any] = False
def __call__( self : Any , *__snake_case : List[str] , **__snake_case : Any ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
UpperCAmelCase : int = kwargs.pop('''raw_speech''' )
else:
UpperCAmelCase : Union[str, Any] = kwargs.pop('''audio''' , __snake_case )
UpperCAmelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __snake_case )
UpperCAmelCase : Dict = kwargs.pop('''text''' , __snake_case )
if len(__snake_case ) > 0:
UpperCAmelCase : Any = args[0]
UpperCAmelCase : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
UpperCAmelCase : List[str] = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
UpperCAmelCase : int = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase : str = encodings['''input_ids''']
return inputs
def A ( self : List[Any] , *__snake_case : List[Any] , **__snake_case : List[Any] ) -> str:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def A ( self : List[Any] , *__snake_case : int , **__snake_case : Optional[int] ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__snake_case , **__snake_case )
UpperCAmelCase : List[Any] = kwargs.pop('''input_features''' , __snake_case )
UpperCAmelCase : Optional[Any] = kwargs.pop('''labels''' , __snake_case )
if len(__snake_case ) > 0:
UpperCAmelCase : List[str] = args[0]
UpperCAmelCase : List[Any] = args[1:]
if input_features is not None:
UpperCAmelCase : Tuple = self.feature_extractor.pad(__snake_case , *__snake_case , **__snake_case )
if labels is not None:
UpperCAmelCase : Optional[int] = self.tokenizer.pad(__snake_case , **__snake_case )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase : List[str] = labels['''input_ids''']
return input_features
def A ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@contextmanager
def A ( self : Any ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
UpperCAmelCase : Dict = True
UpperCAmelCase : List[Any] = self.tokenizer
yield
UpperCAmelCase : Tuple = self.feature_extractor
UpperCAmelCase : List[Any] = False
| 23
| 0
|
'''simple docstring'''
lowerCAmelCase__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def _A ( ):
"""simple docstring"""
__lowercase = input('''Enter message: ''' )
__lowercase = input('''Enter key [alphanumeric]: ''' )
__lowercase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__lowercase = '''encrypt'''
__lowercase = encrypt_message(A__ , A__ )
elif mode.lower().startswith('''d''' ):
__lowercase = '''decrypt'''
__lowercase = decrypt_message(A__ , A__ )
print(F"\n{mode.title()}ed message:" )
print(A__ )
def _A ( A__ , A__ ):
"""simple docstring"""
return translate_message(A__ , A__ , '''encrypt''' )
def _A ( A__ , A__ ):
"""simple docstring"""
return translate_message(A__ , A__ , '''decrypt''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = 0
__lowercase = key.upper()
for symbol in message:
__lowercase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(A__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(A__ ):
__lowercase = 0
else:
translated.append(A__ )
return "".join(A__ )
if __name__ == "__main__":
main()
| 52
|
'''simple docstring'''
def _A ( A__ = 1000 ):
"""simple docstring"""
__lowercase , __lowercase = 1, 1
__lowercase = 2
while True:
__lowercase = 0
__lowercase = fa + fa
__lowercase , __lowercase = fa, f
index += 1
for _ in str(A__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 52
| 1
|
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ):
__a : Any = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ):
__a : Optional[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value('bool' ) , type=Value('int64' ) ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = pa.array(TypedSequence([1, 2, 3] , type=Value('int32' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
__a : Union[str, Any] = pa.array(TypedSequence(['foo', 'bar'] , type=Value('int64' ) ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value('int32' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = pa.array(TypedSequence(['foo', 'bar'] , try_type=Value('int64' ) ) )
self.assertEqual(arr.type , pa.string() )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , 'int64' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
__a : Any = pa.array(TypedSequence(['foo', 'bar'] , type=ArrayaD((1, 3) , 'int64' ) ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , 'int64' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = pa.array(TypedSequence(['foo', 'bar'] , try_type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __UpperCAmelCase ( self ):
'''simple docstring'''
import PIL.Image
__a : Union[str, Any] = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' , side_effect=__a ) as mock_cast_to_python_objects:
__a : Union[str, Any] = pa.array(TypedSequence([{'path': None, 'bytes': b'image_bytes'}, pil_image] , type=Image() ) )
__a , __a : Optional[Any] = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' , __a )
self.assertFalse(kwargs['optimize_list_casting'] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int ):
__a : str = pa.BufferReader(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , pa.Buffer ) else pa.memory_map(_SCREAMING_SNAKE_CASE )
__a : Dict = pa.ipc.open_stream(_SCREAMING_SNAKE_CASE )
__a : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : str = pa.BufferOutputStream()
__a : Any = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
__a , __a : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__a : List[Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCamelCase ():
__a : int = pa.BufferOutputStream()
__a : Optional[int] = Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
__a , __a : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
__a : int = pa.BufferReader(output.getvalue() )
__a : Tuple = pa.ipc.open_stream(_SCREAMING_SNAKE_CASE )
__a : pa.Table = f.read_all()
__a : Optional[int] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
__a : int = pa.BufferOutputStream()
with ArrowWriter(
stream=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE , hash_salt='split_name' , check_duplicates=_SCREAMING_SNAKE_CASE , ) as writer:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=[1, 2] )
__a , __a : Optional[Any] = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Tuple = pa.BufferOutputStream()
with ArrowWriter(
stream=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE , hash_salt='split_name' , check_duplicates=_SCREAMING_SNAKE_CASE , ) as writer:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=10 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=10 )
__a , __a : Optional[Any] = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : List[str] = pa.BufferOutputStream()
with ArrowWriter(
stream=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE , hash_salt='split_name' , check_duplicates=_SCREAMING_SNAKE_CASE , ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} , key=1 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=2 )
__a , __a : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ):
__a : Optional[int] = pa.BufferOutputStream()
__a : int = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
__a , __a : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__a : str = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] ):
__a : Tuple = pa.BufferOutputStream()
__a : str = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
__a , __a : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__a : List[str] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] ):
__a : List[Any] = pa.BufferOutputStream()
__a : List[Any] = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
__a , __a : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__a : List[Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCamelCase ():
with tempfile.TemporaryDirectory() as tmp_dir:
__a : List[Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
__a : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , 'test.arrow' )
with ArrowWriter(path=_SCREAMING_SNAKE_CASE , schema=pa.schema(_SCREAMING_SNAKE_CASE ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
__a , __a : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(_SCREAMING_SNAKE_CASE , 1 )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
if pa.types.is_list(_SCREAMING_SNAKE_CASE ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int ):
if isinstance(lst[0] , _SCREAMING_SNAKE_CASE ):
change_first_primitive_element_in_list(lst[0] , _SCREAMING_SNAKE_CASE )
else:
__a : List[str] = value
@pytest.mark.parametrize('optimized_int_type, expected_dtype' , [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Optional[Any] = pa.array(TypedSequence(_SCREAMING_SNAKE_CASE , optimized_int_type=_SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype' , [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
] , )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
# in range
__a : Any = pa.array(OptimizedTypedSequence(_SCREAMING_SNAKE_CASE , col=_SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
__a : Optional[Any] = copy.deepcopy(_SCREAMING_SNAKE_CASE )
__a : List[str] = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Dict = pa.array(OptimizedTypedSequence(_SCREAMING_SNAKE_CASE , col=_SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception' , [False, True] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Union[str, Any] = str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=_SCREAMING_SNAKE_CASE ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Optional[Any] = 'mock://dataset-train.arrow'
with ArrowWriter(path=_SCREAMING_SNAKE_CASE , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_SCREAMING_SNAKE_CASE ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
__a , __a : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : List[Any] = pa.BufferOutputStream()
with ParquetWriter(stream=_SCREAMING_SNAKE_CASE ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
__a , __a : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
__a : Optional[Any] = pa.BufferReader(output.getvalue() )
__a : pa.Table = pq.read_table(_SCREAMING_SNAKE_CASE )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files' , [False, True] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
import PIL.Image
__a : List[Any] = str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_SCREAMING_SNAKE_CASE , format='png' )
__a : Union[str, Any] = pa.BufferOutputStream()
with ParquetWriter(
stream=_SCREAMING_SNAKE_CASE , features=Features({'image': Image()} ) , embed_local_files=_SCREAMING_SNAKE_CASE ) as writer:
writer.write({'image': image_path} )
writer.finalize()
__a : Any = pa.BufferReader(output.getvalue() )
__a : pa.Table = pq.read_table(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'] , _SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowerCamelCase ():
__a : Tuple = pa.schema([pa.field('col_1' , pa.string() , nullable=_SCREAMING_SNAKE_CASE )] )
__a : str = pa.BufferOutputStream()
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE ) as writer:
writer._build_writer(inferred_schema=_SCREAMING_SNAKE_CASE )
assert writer._schema == pa.schema([pa.field('col_1' , pa.string() )] )
| 27
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 342
| 0
|
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
snake_case__ : Tuple = float('''nan''')
class snake_case_:
def __init__( self : Optional[Any] , UpperCamelCase_ : Dict ):
lowerCAmelCase : Union[str, Any] = sys.stdout
lowerCAmelCase : List[Any] = open(UpperCamelCase_ , '''a''' )
def __getattr__( self : Any , UpperCamelCase_ : Optional[int] ):
return getattr(self.stdout , UpperCamelCase_ )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Dict ):
self.stdout.write(UpperCamelCase_ )
# strip tqdm codes
self.file.write(re.sub(r'''^.*\r''' , '''''' , UpperCamelCase_ , 0 , re.M ) )
def _snake_case ( _snake_case : List[Any]=80 , _snake_case : List[Any]=False ):
lowerCAmelCase : Union[str, Any] = []
# deal with critical env vars
lowerCAmelCase : Union[str, Any] = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
lowerCAmelCase : int = os.environ.get(_snake_case , _snake_case )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
lowerCAmelCase : Optional[Any] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(_snake_case )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCAmelCase : str = []
lowerCAmelCase : Any = ''''''
while len(_snake_case ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(_snake_case ) == 0 or len(_snake_case ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_snake_case )
lowerCAmelCase : str = ''''''
return "\\\n".join(_snake_case )
def _snake_case ( _snake_case : Tuple , _snake_case : Any ):
# unwrap multi-line input
lowerCAmelCase : List[Any] = re.sub(r'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
lowerCAmelCase : Tuple = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
lowerCAmelCase : List[str] = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : List[Any] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , )
lowerCAmelCase : str = subprocess.run(_snake_case , capture_output=_snake_case , text=_snake_case )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
lowerCAmelCase : Optional[int] = variation.replace(''' ''' , '''-''' )
with open(Path(_snake_case ) / f'''log.{prefix}.stdout.txt''' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(_snake_case ) / f'''log.{prefix}.stderr.txt''' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : Union[str, Any] = json.load(_snake_case )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _snake_case ( _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Optional[int] , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Any , ):
lowerCAmelCase : int = []
lowerCAmelCase : Tuple = []
lowerCAmelCase : List[Any] = f'''{id}: {variation:<{longest_variation_len}}'''
lowerCAmelCase : Union[str, Any] = f'''{preamble}: '''
lowerCAmelCase : str = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_snake_case ) , desc=_snake_case , leave=_snake_case ):
lowerCAmelCase : Dict = process_run_single(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
lowerCAmelCase : Any = single_run_metrics[target_metric_key]
if not math.isnan(_snake_case ):
metrics.append(_snake_case )
results.append(_snake_case )
outcome += "✓"
else:
outcome += "✘"
lowerCAmelCase : str = f'''\33[2K\r{outcome}'''
if len(_snake_case ) > 0:
lowerCAmelCase : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
lowerCAmelCase : str = round(mean_metrics[target_metric_key] , 2 )
lowerCAmelCase : Dict = f'''{outcome} {mean_target}'''
if len(_snake_case ) > 1:
results_str += f''' {tuple(round(_snake_case , 2 ) for x in results )}'''
print(_snake_case )
lowerCAmelCase : Any = variation
return mean_metrics
else:
print(_snake_case )
return {variation_key: variation, target_metric_key: nan}
def _snake_case ( ):
lowerCAmelCase : int = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return f'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : int ):
lowerCAmelCase : Tuple = pd.DataFrame(_snake_case )
lowerCAmelCase : Tuple = '''variation'''
lowerCAmelCase : str = '''diff_%'''
lowerCAmelCase : List[str] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
lowerCAmelCase : Any = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_snake_case ):
# as a fallback, use the minimal value as the sentinel
lowerCAmelCase : List[Any] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_snake_case ):
lowerCAmelCase : Dict = df.apply(
lambda _snake_case : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
lowerCAmelCase : Tuple = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCAmelCase : int = df.reindex(_snake_case , axis='''columns''' ) # reorder cols
# capitalize
lowerCAmelCase : Tuple = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
lowerCAmelCase : str = df.rename(lambda _snake_case : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
lowerCAmelCase : List[str] = df.rename(lambda _snake_case : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
lowerCAmelCase : Tuple = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_snake_case , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_snake_case , floatfmt='''.2f''' )]
print('''\n\n'''.join(_snake_case ) )
def _snake_case ( ):
lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=_snake_case , type=_snake_case , nargs='''+''' , required=_snake_case , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=_snake_case , type=_snake_case , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=_snake_case , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=_snake_case , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=_snake_case , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=_snake_case , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
lowerCAmelCase : Dict = parser.parse_args()
lowerCAmelCase : str = args.output_dir
Path(_snake_case ).mkdir(exist_ok=_snake_case )
lowerCAmelCase : Tuple = get_base_command(_snake_case , _snake_case )
# split each dimension into its --foo variations
lowerCAmelCase : List[Any] = [list(map(str.strip , re.split(r'''\|''' , _snake_case ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCAmelCase : Optional[Any] = list(map(str.strip , map(''' '''.join , itertools.product(*_snake_case ) ) ) )
lowerCAmelCase : Any = max(len(_snake_case ) for x in variations )
# split wanted keys
lowerCAmelCase : List[str] = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCAmelCase : List[Any] = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
lowerCAmelCase : Optional[Any] = Tee(_snake_case )
print(f'''\n*** Running {len(_snake_case )} benchmarks:''' )
print(f'''Base command: {" ".join(_snake_case )}''' )
lowerCAmelCase : str = '''variation'''
lowerCAmelCase : str = []
for id, variation in enumerate(tqdm(_snake_case , desc='''Total completion: ''' , leave=_snake_case ) ):
lowerCAmelCase : str = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _snake_case , _snake_case , _snake_case , _snake_case , args.target_metric_key , _snake_case , args.repeat_times , _snake_case , args.verbose , ) )
process_results(_snake_case , args.target_metric_key , _snake_case , args.base_variation , _snake_case )
if __name__ == "__main__":
main()
| 314
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _snake_case ( _snake_case : Dict ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def _snake_case ( _snake_case : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCAmelCase : str = ord(_snake_case )
if not _is_chinese_char(_snake_case ):
return 0
return 1
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : List[Any] = set()
for token in tokens:
lowerCAmelCase : Union[str, Any] = len(_snake_case ) > 1 and is_chinese(_snake_case )
if chinese_word:
word_set.add(_snake_case )
lowerCAmelCase : List[str] = list(_snake_case )
return word_list
def _snake_case ( _snake_case : List[str] , _snake_case : set() ):
if not chinese_word_set:
return bert_tokens
lowerCAmelCase : List[Any] = max([len(_snake_case ) for w in chinese_word_set] )
lowerCAmelCase : Optional[Any] = bert_tokens
lowerCAmelCase, lowerCAmelCase : Any = 0, len(_snake_case )
while start < end:
lowerCAmelCase : str = True
if is_chinese(bert_word[start] ):
lowerCAmelCase : List[Any] = min(end - start , _snake_case )
for i in range(_snake_case , 1 , -1 ):
lowerCAmelCase : str = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCAmelCase : Optional[Any] = '''##''' + bert_word[j]
lowerCAmelCase : Union[str, Any] = start + i
lowerCAmelCase : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ):
lowerCAmelCase : Optional[int] = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[int] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
lowerCAmelCase : Union[str, Any] = [get_chinese_word(_snake_case ) for r in res]
ltp_res.extend(_snake_case )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : int = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_snake_case , _snake_case ):
lowerCAmelCase : Optional[int] = []
for id in input_ids:
lowerCAmelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(_snake_case )
input_tokens.append(_snake_case )
lowerCAmelCase : Any = add_sub_symbol(_snake_case , _snake_case )
lowerCAmelCase : Union[str, Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_snake_case ):
if token[:2] == "##":
lowerCAmelCase : Any = token[2:]
# save chinese tokens' pos
if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ):
ref_id.append(_snake_case )
ref_ids.append(_snake_case )
assert len(_snake_case ) == len(_snake_case )
return ref_ids
def _snake_case ( _snake_case : Dict ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[str] = f.readlines()
lowerCAmelCase : Union[str, Any] = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase : List[str] = LTP(args.ltp ) # faster in GPU device
lowerCAmelCase : Any = BertTokenizer.from_pretrained(args.bert )
lowerCAmelCase : int = prepare_ref(_snake_case , _snake_case , _snake_case )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[Any] = [json.dumps(_snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(_snake_case )
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
snake_case__ : int = parser.parse_args()
main(args)
| 314
| 1
|
'''simple docstring'''
def snake_case_ (_a : float , _a : float , _a : float , _a : float , _a : float , ):
UpperCAmelCase = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
UpperCAmelCase = 1 - (matter_density + radiation_density + dark_energy)
UpperCAmelCase = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
UpperCAmelCase = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
A =0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 34
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
A =logging.get_logger(__name__)
A =TypeVar('DatasetType', Dataset, IterableDataset)
def snake_case_ (_a : List[DatasetType] , _a : Optional[List[float]] = None , _a : Optional[int] = None , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_a ):
if not isinstance(_a , (Dataset, IterableDataset) ):
if isinstance(_a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(_a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset)
)
elif not isinstance(_a , _a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_a , _a , _a , info=_a , split=_a , stopping_strategy=_a )
else:
return _interleave_iterable_datasets(
_a , _a , _a , info=_a , split=_a , stopping_strategy=_a )
def snake_case_ (_a : List[DatasetType] , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : int = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_a ):
if not isinstance(_a , (Dataset, IterableDataset) ):
if isinstance(_a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(_a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset)
)
elif not isinstance(_a , _a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_a , info=_a , split=_a , axis=_a )
else:
return _concatenate_iterable_datasets(_a , info=_a , split=_a , axis=_a )
| 34
| 1
|
from __future__ import annotations
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Tuple, _UpperCAmelCase : int = 0 ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = key
def A_ ( self : List[Any], _UpperCAmelCase : str, _UpperCAmelCase : int ) -> list[str]:
"""simple docstring"""
assert isinstance(_UpperCAmelCase, _UpperCAmelCase ) and isinstance(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(_UpperCAmelCase ) ^ key ) for ch in content]
def A_ ( self : Optional[Any], _UpperCAmelCase : str, _UpperCAmelCase : int ) -> list[str]:
"""simple docstring"""
assert isinstance(_UpperCAmelCase, _UpperCAmelCase ) and isinstance(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_5_5
return [chr(ord(_UpperCAmelCase ) ^ key ) for ch in content]
def A_ ( self : Union[str, Any], _UpperCAmelCase : str, _UpperCAmelCase : int = 0 ) -> str:
"""simple docstring"""
assert isinstance(_UpperCAmelCase, _UpperCAmelCase ) and isinstance(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
SCREAMING_SNAKE_CASE__ : List[Any] = ""
for ch in content:
ans += chr(ord(_UpperCAmelCase ) ^ key )
return ans
def A_ ( self : Dict, _UpperCAmelCase : str, _UpperCAmelCase : int = 0 ) -> str:
"""simple docstring"""
assert isinstance(_UpperCAmelCase, _UpperCAmelCase ) and isinstance(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 2_5_5:
key -= 2_5_5
# This will be returned
SCREAMING_SNAKE_CASE__ : Tuple = ""
for ch in content:
ans += chr(ord(_UpperCAmelCase ) ^ key )
return ans
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : int = 0 ) -> bool:
"""simple docstring"""
assert isinstance(_UpperCAmelCase, _UpperCAmelCase ) and isinstance(_UpperCAmelCase, _UpperCAmelCase )
try:
with open(_UpperCAmelCase ) as fin, open("encrypt.out", "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCAmelCase, _UpperCAmelCase ) )
except OSError:
return False
return True
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : int ) -> bool:
"""simple docstring"""
assert isinstance(_UpperCAmelCase, _UpperCAmelCase ) and isinstance(_UpperCAmelCase, _UpperCAmelCase )
try:
with open(_UpperCAmelCase ) as fin, open("decrypt.out", "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCAmelCase, _UpperCAmelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 191
|
import qiskit
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE__ : List[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE__ : int = qiskit.execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = single_qubit_measure(2, 2)
print(f"Total count for various states are: {counts}")
| 191
| 1
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__lowerCamelCase : Optional[int] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : List[str] = AudioClassificationPipeline(model=lowerCamelCase__, feature_extractor=lowerCamelCase__ )
# test with a raw waveform
A : Any = np.zeros((3_4000,) )
A : Any = np.zeros((1_4000,) )
return audio_classifier, [audioa, audio]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[Any] = examples
A : Optional[Any] = audio_classifier(lowerCamelCase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCamelCase__, [
{"""score""": ANY(lowerCamelCase__ ), """label""": ANY(lowerCamelCase__ )},
{"""score""": ANY(lowerCamelCase__ ), """label""": ANY(lowerCamelCase__ )},
], )
A : Tuple = audio_classifier(lowerCamelCase__, top_k=1 )
self.assertEqual(
lowerCamelCase__, [
{"""score""": ANY(lowerCamelCase__ ), """label""": ANY(lowerCamelCase__ )},
], )
self.run_torchaudio(lowerCamelCase__ )
@require_torchaudio
def _lowerCAmelCase ( self, lowerCamelCase__ ):
import datasets
# test with a local file
A : List[Any] = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""", """clean""", split="""validation""" )
A : Optional[int] = dataset[0]["""audio"""]["""array"""]
A : int = audio_classifier(lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__, [
{"""score""": ANY(lowerCamelCase__ ), """label""": ANY(lowerCamelCase__ )},
{"""score""": ANY(lowerCamelCase__ ), """label""": ANY(lowerCamelCase__ )},
], )
@require_torch
def _lowerCAmelCase ( self ):
A : Optional[int] = """anton-l/wav2vec2-random-tiny-classifier"""
A : str = pipeline("""audio-classification""", model=lowerCamelCase__ )
A : Any = np.ones((8000,) )
A : List[Any] = audio_classifier(lowerCamelCase__, top_k=4 )
A : int = [
{"""score""": 0.0842, """label""": """no"""},
{"""score""": 0.0838, """label""": """up"""},
{"""score""": 0.0837, """label""": """go"""},
{"""score""": 0.0834, """label""": """right"""},
]
A : Union[str, Any] = [
{"""score""": 0.0845, """label""": """stop"""},
{"""score""": 0.0844, """label""": """on"""},
{"""score""": 0.0841, """label""": """right"""},
{"""score""": 0.0834, """label""": """left"""},
]
self.assertIn(nested_simplify(lowerCamelCase__, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
A : str = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
A : str = audio_classifier(lowerCamelCase__, top_k=4 )
self.assertIn(nested_simplify(lowerCamelCase__, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _lowerCAmelCase ( self ):
import datasets
A : Tuple = """superb/wav2vec2-base-superb-ks"""
A : Optional[int] = pipeline("""audio-classification""", model=lowerCamelCase__ )
A : Optional[Any] = datasets.load_dataset("""anton-l/superb_dummy""", """ks""", split="""test""" )
A : List[str] = np.array(dataset[3]["""speech"""], dtype=np.floataa )
A : List[str] = audio_classifier(lowerCamelCase__, top_k=4 )
self.assertEqual(
nested_simplify(lowerCamelCase__, decimals=3 ), [
{"""score""": 0.981, """label""": """go"""},
{"""score""": 0.007, """label""": """up"""},
{"""score""": 0.006, """label""": """_unknown_"""},
{"""score""": 0.001, """label""": """down"""},
], )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def _lowerCAmelCase ( self ):
pass
| 116
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_:Any = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def __UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , bootstrap_aggregation=_lowerCAmelCase , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
A : Tuple = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , bootstrap_aggregation=_lowerCAmelCase , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
A : Dict = """rougeLsum"""
A : str = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=[k] )[k]
A : Tuple = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def __UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = ["""rouge1""", """rouge2""", """rougeL"""]
A : Dict = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=_lowerCAmelCase )
A : List[str] = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=_lowerCAmelCase )
assert score_sep == score_no_sep
def __UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A : Optional[Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A : Optional[Any] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase )
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
A : Tuple = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A : Union[str, Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A : int = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , rouge_keys=["""rougeLsum"""] , newline_sep=_lowerCAmelCase )["""rougeLsum"""]
A : Optional[Any] = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
A : Tuple = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A : Optional[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
A : List[Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
| 116
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case__ : List[str] = logging.getLogger(__name__)
@dataclass
class A_ :
lowerCAmelCase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowerCAmelCase__ = field(default=_lowerCamelCase , metadata={"""help""": """Whether tp freeze the encoder."""} )
lowerCAmelCase__ = field(default=_lowerCamelCase , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class A_ :
lowerCAmelCase__ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowerCAmelCase__ = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
lowerCAmelCase__ = field(
default=1024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase__ = field(
default=128 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase__ = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
lowerCAmelCase__ = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase__ = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
lowerCAmelCase__ = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
lowerCAmelCase__ = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
lowerCAmelCase__ = field(default=_lowerCamelCase , metadata={"""help""": """Source language id for translation."""} )
lowerCAmelCase__ = field(default=_lowerCamelCase , metadata={"""help""": """Target language id for translation."""} )
lowerCAmelCase__ = field(default=_lowerCamelCase , metadata={"""help""": """# num_beams to use for evaluation."""} )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def _a ( lowerCamelCase: Dict , lowerCamelCase: str , lowerCamelCase: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowerCamelCase , os.path.join(lowerCamelCase , F"""{split}_results.json""" ) )
def _a ( ) -> List[str]:
'''simple docstring'''
__A = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__A , __A , __A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__A , __A , __A = parser.parse_args_into_dataclasses()
check_output_dir(lowerCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__A = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
assert hasattr(lowerCamelCase , lowerCamelCase ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
__A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__A = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowerCamelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__A = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowerCamelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowerCamelCase , lowerCamelCase ):
__A = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__A = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowerCamelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__A = SeqaSeqDataset
# Get datasets
__A = (
dataset_class(
lowerCamelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__A = (
dataset_class(
lowerCamelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__A = (
dataset_class(
lowerCamelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__A = (
build_compute_metrics_fn(data_args.task , lowerCamelCase ) if training_args.predict_with_generate else None
)
__A = SeqaSeqTrainer(
model=lowerCamelCase , args=lowerCamelCase , data_args=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , data_collator=SeqaSeqDataCollator(
lowerCamelCase , lowerCamelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowerCamelCase , tokenizer=lowerCamelCase , )
__A = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__A = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__A = train_result.metrics
__A = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , lowerCamelCase , training_args.output_dir )
all_metrics.update(lowerCamelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__A = trainer.evaluate(metric_key_prefix='''val''' )
__A = data_args.n_val
__A = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , lowerCamelCase , training_args.output_dir )
all_metrics.update(lowerCamelCase )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__A = trainer.predict(test_dataset=lowerCamelCase , metric_key_prefix='''test''' )
__A = test_output.metrics
__A = data_args.n_test
if trainer.is_world_process_zero():
__A = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , lowerCamelCase , training_args.output_dir )
all_metrics.update(lowerCamelCase )
if training_args.predict_with_generate:
__A = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
__A = lmap(str.strip , lowerCamelCase )
write_txt_file(lowerCamelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(lowerCamelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def _a ( lowerCamelCase: int ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 250
|
def _a ( lowerCamelCase: str ) -> bool:
'''simple docstring'''
__A = [int(lowerCamelCase ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(lowerCamelCase ) == 4 and all(0 <= int(lowerCamelCase ) <= 2_54 for octet in octets )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = input().strip()
snake_case__ : Any = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f'{ip} is a {valid_or_invalid} IP v4 address.')
| 250
| 1
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def snake_case_ ( lowerCAmelCase_ : List[Any] ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def snake_case_ ( ):
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
__lowercase : List[Any] = [1, 2, 3]
with pytest.raises(lowerCAmelCase_ ):
with parallel_backend("""unsupported backend""" ):
map_nested(lowerCAmelCase_ , lowerCAmelCase_ , num_proc=2 )
with pytest.raises(lowerCAmelCase_ ):
with parallel_backend("""unsupported backend""" ):
map_nested(lowerCAmelCase_ , lowerCAmelCase_ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
__lowercase : List[str] = [1, 2]
__lowercase : Optional[Any] = {"""a""": 1, """b""": 2}
__lowercase : str = {"""a""": [1, 2], """b""": [3, 4]}
__lowercase : Tuple = {"""a""": {"""1""": 1}, """b""": 2}
__lowercase : Optional[int] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
__lowercase : int = [2, 3]
__lowercase : Union[str, Any] = {"""a""": 2, """b""": 3}
__lowercase : List[str] = {"""a""": [2, 3], """b""": [4, 5]}
__lowercase : Optional[Any] = {"""a""": {"""1""": 2}, """b""": 3}
__lowercase : Any = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(lowerCAmelCase_ , lowerCAmelCase_ , num_proc=lowerCAmelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase_ , lowerCAmelCase_ , num_proc=lowerCAmelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase_ , lowerCAmelCase_ , num_proc=lowerCAmelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase_ , lowerCAmelCase_ , num_proc=lowerCAmelCase_ ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase_ , lowerCAmelCase_ , num_proc=lowerCAmelCase_ ) == expected_map_nested_sa
| 233
|
import math
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : int=0 ) -> Optional[Any]: # a graph with Node 0,1,...,N-1
"""simple docstring"""
__lowercase : Any = n
__lowercase : Optional[int] = [
[math.inf for j in range(0 , __a )] for i in range(0 , __a )
] # adjacency matrix for weight
__lowercase : Dict = [
[math.inf for j in range(0 , __a )] for i in range(0 , __a )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase ( self : int , __a : Optional[int] , __a : Tuple , __a : Dict ) -> Dict:
"""simple docstring"""
__lowercase : Any = w
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__lowercase : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase ( self : Any , __a : Any , __a : str ) -> int:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase : int = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 233
| 1
|
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCAmelCase : int = numpy.array([0, 0])
_lowerCAmelCase : Dict = numpy.array([0.5, 0.8_660_254])
_lowerCAmelCase : str = numpy.array([1, 0])
_lowerCAmelCase : Union[str, Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> list[numpy.ndarray]:
'''simple docstring'''
_lowerCamelCase : Dict = initial_vectors
for _ in range(_lowerCamelCase ):
_lowerCamelCase : int = iteration_step(_lowerCamelCase )
return vectors
def lowerCamelCase_( _lowerCamelCase ) -> list[numpy.ndarray]:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(_lowerCamelCase )
_lowerCamelCase : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> numpy.ndarray:
'''simple docstring'''
_lowerCamelCase : int = numpy.radians(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = numpy.cos(_lowerCamelCase ), numpy.sin(_lowerCamelCase )
_lowerCamelCase : str = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = zip(*_lowerCamelCase )
plt.plot(_lowerCamelCase , _lowerCamelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Any = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 340
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Optional[Any] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340
| 1
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : int = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ["GLPNFeatureExtractor"]
lowerCamelCase : Optional[int] = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 47
| 0
|
_UpperCAmelCase = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Tuple = input('Enter message: ' )
A_ : int = input('Enter key [alphanumeric]: ' )
A_ : Optional[Any] = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
A_ : List[Any] = 'encrypt'
A_ : int = encrypt_message(__lowercase , __lowercase )
elif mode.lower().startswith('d' ):
A_ : Optional[Any] = 'decrypt'
A_ : Dict = decrypt_message(__lowercase , __lowercase )
print(f'''\n{mode.title()}ed message:''' )
print(__lowercase )
def UpperCamelCase ( __lowercase : str , __lowercase : str ):
'''simple docstring'''
return translate_message(__lowercase , __lowercase , 'encrypt' )
def UpperCamelCase ( __lowercase : str , __lowercase : str ):
'''simple docstring'''
return translate_message(__lowercase , __lowercase , 'decrypt' )
def UpperCamelCase ( __lowercase : str , __lowercase : str , __lowercase : str ):
'''simple docstring'''
A_ : Tuple = []
A_ : str = 0
A_ : Optional[int] = key.upper()
for symbol in message:
A_ : Optional[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowercase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowercase ):
A_ : str = 0
else:
translated.append(__lowercase )
return "".join(__lowercase )
if __name__ == "__main__":
main()
| 357
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_UpperCAmelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Tuple ,__lowercase : str ,__lowercase : List[Any] ,__lowercase : Optional[int] ):
'''simple docstring'''
for attribute in key.split('.' ):
A_ : List[Any] = getattr(__lowercase ,__lowercase )
if weight_type is not None:
A_ : Dict = getattr(__lowercase ,__lowercase ).shape
else:
A_ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ : Dict = value
elif weight_type == "weight_g":
A_ : str = value
elif weight_type == "weight_v":
A_ : int = value
elif weight_type == "bias":
A_ : int = value
else:
A_ : List[Any] = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = []
A_ : int = fairseq_model.state_dict()
A_ : Optional[Any] = hf_model.feature_extractor
A_ : List[str] = hf_model.adapter
for name, value in fairseq_dict.items():
A_ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,hf_model.config.feat_extract_norm == 'group' ,)
A_ : Optional[Any] = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
A_ : Tuple = True
if "*" in mapped_key:
A_ : Optional[Any] = name.split(__lowercase )[0].split('.' )[-2]
A_ : List[Any] = mapped_key.replace('*' ,__lowercase )
if "weight_g" in name:
A_ : Optional[int] = 'weight_g'
elif "weight_v" in name:
A_ : Union[str, Any] = 'weight_v'
elif "bias" in name:
A_ : Any = 'bias'
elif "weight" in name:
A_ : str = 'weight'
else:
A_ : Optional[Any] = None
set_recursively(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( __lowercase : Dict ,__lowercase : List[Any] ,__lowercase : Tuple ,__lowercase : Dict ,__lowercase : Any ):
'''simple docstring'''
A_ : List[Any] = full_name.split('conv_layers.' )[-1]
A_ : Optional[int] = name.split('.' )
A_ : Tuple = int(items[0] )
A_ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ : str = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ : List[str] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowercase )
def UpperCamelCase ( __lowercase : Any ,__lowercase : Tuple ,__lowercase : Optional[Any] ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = full_name.split('adaptor.' )[-1]
A_ : List[Any] = name.split('.' )
if items[1].isdigit():
A_ : Union[str, Any] = int(items[1] )
else:
A_ : Tuple = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
A_ : Dict = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
A_ : int = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
A_ : Dict = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
A_ : Tuple = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__lowercase ,__lowercase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
A_ : Tuple = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
A_ : str = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__lowercase )
def UpperCamelCase ( __lowercase : List[Any] ):
'''simple docstring'''
A_ , A_ : Any = emb.weight.shape
A_ : Tuple = nn.Linear(__lowercase ,__lowercase ,bias=__lowercase )
A_ : Optional[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase ( __lowercase : Any ,__lowercase : Optional[int] ,__lowercase : Any ,__lowercase : str ,__lowercase : Dict ,__lowercase : Dict ,__lowercase : Tuple ,__lowercase : Optional[int] ,__lowercase : List[str] ,__lowercase : List[Any] ,__lowercase : str ,):
'''simple docstring'''
A_ : Optional[int] = WavaVecaConfig.from_pretrained(
__lowercase ,add_adapter=__lowercase ,adapter_stride=__lowercase ,adapter_kernel_size=__lowercase ,use_auth_token=__lowercase ,output_hidden_size=__lowercase ,)
A_ : Any = MBartConfig.from_pretrained(__lowercase )
# load model
A_ , A_ , A_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} ,)
A_ : Union[str, Any] = model[0].eval()
# load feature extractor
A_ : Any = WavaVecaFeatureExtractor.from_pretrained(__lowercase ,use_auth_token=__lowercase )
# set weights for wav2vec2 encoder
A_ : Optional[Any] = WavaVecaModel(__lowercase )
recursively_load_weights_wavaveca(model.encoder ,__lowercase )
# load decoder weights
A_ : Dict = MBartForCausalLM(__lowercase )
A_ , A_ : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__lowercase )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
A_ : Optional[int] = SpeechEncoderDecoderModel(encoder=__lowercase ,decoder=__lowercase )
A_ : Any = False
A_ : List[Any] = MBartaaTokenizer(__lowercase )
tokenizer.save_pretrained(__lowercase )
A_ : Dict = hf_wavavec.config.to_dict()
A_ : Any = tokenizer.pad_token_id
A_ : Optional[Any] = tokenizer.bos_token_id
A_ : Union[str, Any] = tokenizer.eos_token_id
A_ : Dict = 'mbart50'
A_ : str = 'wav2vec2'
A_ : int = tokenizer.eos_token_id
A_ : List[str] = 25_00_04
A_ : int = tokenizer.eos_token_id
A_ : Optional[Any] = SpeechEncoderDecoderConfig.from_dict(__lowercase )
hf_wavavec.save_pretrained(__lowercase )
feature_extractor.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""")
_UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 192
| 0
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : Any , A__ : Union[str, Any]=False ):
'''simple docstring'''
__lowerCamelCase = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
__lowerCamelCase = """segformer.encoder.""" + key
if key.startswith("""backbone""" ):
__lowerCamelCase = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__lowerCamelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
__lowerCamelCase = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(A__ )-1}' )
if "norm" in key:
__lowerCamelCase = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__lowerCamelCase = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
__lowerCamelCase = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(A__ )-1}' )
if "layer_norm1" in key:
__lowerCamelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
__lowerCamelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
__lowerCamelCase = key[key.find("""block""" ) + len("""block""" )]
__lowerCamelCase = key.replace(f'block{idx}' , f'block.{int(A__ )-1}' )
if "attn.q" in key:
__lowerCamelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
__lowerCamelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
__lowerCamelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
__lowerCamelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
__lowerCamelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
__lowerCamelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
__lowerCamelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
__lowerCamelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__lowerCamelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
__lowerCamelCase = key.replace(f'linear_c{idx}' , f'linear_c.{int(A__ )-1}' )
if key.startswith("""head""" ):
__lowerCamelCase = key.replace("""head""" , """classifier""" )
__lowerCamelCase = value
return new_state_dict
def lowerCamelCase__ ( A__ : List[Any] , A__ : int ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__lowerCamelCase = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
__lowerCamelCase = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
__lowerCamelCase = kv_weight[
: config.hidden_sizes[i], :
]
__lowerCamelCase = kv_bias[: config.hidden_sizes[i]]
__lowerCamelCase = kv_weight[
config.hidden_sizes[i] :, :
]
__lowerCamelCase = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return image
@torch.no_grad()
def lowerCamelCase__ ( A__ : int , A__ : Optional[Any] , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = SegformerConfig()
__lowerCamelCase = False
# set attributes based on model_name
__lowerCamelCase = """huggingface/label-files"""
if "segformer" in model_name:
__lowerCamelCase = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
__lowerCamelCase = 150
__lowerCamelCase = """ade20k-id2label.json"""
__lowerCamelCase = (1, 150, 128, 128)
elif "city" in model_name:
__lowerCamelCase = 19
__lowerCamelCase = """cityscapes-id2label.json"""
__lowerCamelCase = (1, 19, 128, 128)
else:
raise ValueError(f'Model {model_name} not supported' )
elif "mit" in model_name:
__lowerCamelCase = True
__lowerCamelCase = model_name[4:6]
__lowerCamelCase = 1000
__lowerCamelCase = """imagenet-1k-id2label.json"""
__lowerCamelCase = (1, 1000)
else:
raise ValueError(f'Model {model_name} not supported' )
# set config attributes
__lowerCamelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
__lowerCamelCase = [64, 128, 320, 512]
__lowerCamelCase = 256
elif size == "b2":
__lowerCamelCase = [64, 128, 320, 512]
__lowerCamelCase = 768
__lowerCamelCase = [3, 4, 6, 3]
elif size == "b3":
__lowerCamelCase = [64, 128, 320, 512]
__lowerCamelCase = 768
__lowerCamelCase = [3, 4, 18, 3]
elif size == "b4":
__lowerCamelCase = [64, 128, 320, 512]
__lowerCamelCase = 768
__lowerCamelCase = [3, 8, 27, 3]
elif size == "b5":
__lowerCamelCase = [64, 128, 320, 512]
__lowerCamelCase = 768
__lowerCamelCase = [3, 6, 40, 3]
else:
raise ValueError(f'Size {size} not supported' )
# load image processor (only resize + normalize)
__lowerCamelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=A__ , align=A__ , do_random_crop=A__ )
# prepare image
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=A__ , return_tensors="""pt""" ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
if encoder_only:
__lowerCamelCase = torch.load(A__ , map_location=torch.device("""cpu""" ) )
else:
__lowerCamelCase = torch.load(A__ , map_location=torch.device("""cpu""" ) )["""state_dict"""]
# rename keys
__lowerCamelCase = rename_keys(A__ , encoder_only=A__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(A__ , A__ )
# create HuggingFace model and load state dict
if encoder_only:
__lowerCamelCase = False
__lowerCamelCase = SegformerForImageClassification(A__ )
else:
__lowerCamelCase = SegformerForSemanticSegmentation(A__ )
model.load_state_dict(A__ )
model.eval()
# forward pass
__lowerCamelCase = model(A__ )
__lowerCamelCase = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
__lowerCamelCase = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
__lowerCamelCase = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
__lowerCamelCase = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
__lowerCamelCase = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
__lowerCamelCase = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
__lowerCamelCase = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
__lowerCamelCase = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
__lowerCamelCase = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
__lowerCamelCase = torch.tensor(
[
[
[-1.1_3_7_2E0_1, -1.2_7_8_7E0_1, -1.3_4_7_7E0_1],
[-1.2_5_3_6E0_1, -1.4_1_9_4E0_1, -1.4_4_0_9E0_1],
[-1.3_2_1_7E0_1, -1.4_8_8_8E0_1, -1.5_3_2_7E0_1],
],
[
[-1.4_7_9_1E0_1, -1.7_1_2_2E0_1, -1.8_2_7_7E0_1],
[-1.7_1_6_3E0_1, -1.9_1_9_2E0_1, -1.9_5_3_3E0_1],
[-1.7_8_9_7E0_1, -1.9_9_9_1E0_1, -2.0_3_1_5E0_1],
],
[
[7.6_7_2_3E-0_1, 4.1_9_2_1E-0_1, -7.7_8_7_8E-0_2],
[4.7_7_7_2E-0_1, 9.5_5_5_7E-0_3, -2.8_0_8_2E-0_1],
[3.6_0_3_2E-0_1, -2.4_8_2_6E-0_1, -5.1_1_6_8E-0_1],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
__lowerCamelCase = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
__lowerCamelCase = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
__lowerCamelCase = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
__lowerCamelCase = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
__lowerCamelCase = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
__lowerCamelCase = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
__lowerCamelCase = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , A__ , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='segformer.b0.512x512.ade.160k',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCAmelCase_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 12
|
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase__ ( A__ : list[list[int]] ):
'''simple docstring'''
__lowerCamelCase = []
for i in range(len(A__ ) ):
__lowerCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowerCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowerCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A__ )
return next_generation
def lowerCamelCase__ ( A__ : list[list[int]] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
for _ in range(A__ ):
# Create output image
__lowerCamelCase = Image.new("""RGB""" , (len(cells[0] ), len(A__ )) )
__lowerCamelCase = img.load()
# Save cells to image
for x in range(len(A__ ) ):
for y in range(len(cells[0] ) ):
__lowerCamelCase = 255 - cells[y][x] * 255
__lowerCamelCase = (colour, colour, colour)
# Save image
images.append(A__ )
__lowerCamelCase = new_generation(A__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 12
| 1
|
"""simple docstring"""
from __future__ import annotations
class a :
"""simple docstring"""
def __init__( self: Tuple , UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
A__ , A__ = text, pattern
A__ , A__ = len(UpperCamelCase ), len(UpperCamelCase )
def UpperCamelCase ( self: Any , UpperCamelCase: str ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCamelCase ( self: Dict , UpperCamelCase: int ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = []
for i in range(self.textLen - self.patLen + 1 ):
A__ = self.mismatch_in_text(UpperCamelCase )
if mismatch_index == -1:
positions.append(UpperCamelCase )
else:
A__ = self.match_in_pattern(self.text[mismatch_index] )
A__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE_ : Optional[int] = 'ABAABA'
SCREAMING_SNAKE_CASE_ : Tuple = 'AB'
SCREAMING_SNAKE_CASE_ : Tuple = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE_ : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 367
|
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = BartphoTokenizer
UpperCAmelCase = False
UpperCAmelCase = True
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
super().setUp()
A__ = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = {"""unk_token""": """<unk>"""}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
A__ = BartphoTokenizer(UpperCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self: Dict , **UpperCamelCase: Tuple ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = """This is a là test"""
A__ = """This is a<unk><unk> test"""
return input_text, output_text
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = BartphoTokenizer(UpperCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
A__ = """This is a là test"""
A__ = """▁This ▁is ▁a ▁l à ▁t est""".split()
A__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
A__ = tokens + [tokenizer.unk_token]
A__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
| 69
| 0
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
UpperCamelCase = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_SCREAMING_SNAKE_CASE = int(re.match(r""".*layer_(\d*).*""" ,snake_case__ )[1] )
layer_number -= 3
return F'h.{layer_number}.' + key
def __lowerCamelCase ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
_SCREAMING_SNAKE_CASE = re.search(r"""[^\d](\d+)$""" ,str(snake_case__ ) )
if bit_search is None:
raise ValueError(F'`dtype` is not a valid dtype: {dtype}.' )
_SCREAMING_SNAKE_CASE = int(bit_search.groups()[0] )
return bit_size // 8
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
if bloom_config_file == "":
_SCREAMING_SNAKE_CASE = BloomConfig()
else:
_SCREAMING_SNAKE_CASE = BloomConfig.from_json_file(snake_case__ )
if shard_model:
_SCREAMING_SNAKE_CASE = os.listdir(snake_case__ )
_SCREAMING_SNAKE_CASE = sorted(filter(lambda snake_case__ : s.startswith("""layer""" ) and "model_00" in s ,snake_case__ ) )
_SCREAMING_SNAKE_CASE = {"""weight_map""": {}, """metadata""": {}}
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = BloomConfig()
for j, file in enumerate(snake_case__ ):
print("""Processing file: {}""".format(snake_case__ ) )
_SCREAMING_SNAKE_CASE = None
for i in range(snake_case__ ):
# load all TP files
_SCREAMING_SNAKE_CASE = file.replace("""model_00""" ,F'model_0{i}' )
_SCREAMING_SNAKE_CASE = torch.load(os.path.join(snake_case__ ,snake_case__ ) ,map_location="""cpu""" )
# Rename keys in the transformers names
_SCREAMING_SNAKE_CASE = list(temp.keys() )
for key in keys:
_SCREAMING_SNAKE_CASE = temp.pop(snake_case__ )
if tensors is None:
_SCREAMING_SNAKE_CASE = temp
else:
for key in tensors.keys():
if any(key.endswith(snake_case__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_SCREAMING_SNAKE_CASE = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_SCREAMING_SNAKE_CASE = torch.cat([tensors[key], temp[key]] ,dim=snake_case__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_SCREAMING_SNAKE_CASE = tensors[key] / pretraining_tp
torch.save(
snake_case__ ,os.path.join(
snake_case__ ,"""pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) ,str(len(snake_case__ ) ).zfill(5 ) ) ,) ,)
for key in tensors.keys():
_SCREAMING_SNAKE_CASE = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
_SCREAMING_SNAKE_CASE = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) ,str(len(snake_case__ ) ).zfill(5 ) )
_SCREAMING_SNAKE_CASE = BloomConfig()
_SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_SCREAMING_SNAKE_CASE = total_size
with open(snake_case__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(snake_case__ ,WEIGHTS_NAME + """.index.json""" ) ,"""w""" ,encoding="""utf-8""" ) as f:
_SCREAMING_SNAKE_CASE = json.dumps(snake_case__ ,indent=2 ,sort_keys=snake_case__ ) + """\n"""
f.write(snake_case__ )
else:
_SCREAMING_SNAKE_CASE = BloomModel(snake_case__ )
_SCREAMING_SNAKE_CASE = os.listdir(snake_case__ )
_SCREAMING_SNAKE_CASE = sorted(filter(lambda snake_case__ : s.startswith("""layer""" ) and "model_00" in s ,snake_case__ ) )
_SCREAMING_SNAKE_CASE = None
for i, file in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE = None
for i in range(snake_case__ ):
# load all TP files
_SCREAMING_SNAKE_CASE = file.replace("""model_00""" ,F'model_0{i}' )
_SCREAMING_SNAKE_CASE = torch.load(os.path.join(snake_case__ ,snake_case__ ) ,map_location="""cpu""" )
# Rename keys in the transformers names
_SCREAMING_SNAKE_CASE = list(temp.keys() )
for key in keys:
_SCREAMING_SNAKE_CASE = temp.pop(snake_case__ )
if tensors is None:
_SCREAMING_SNAKE_CASE = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(snake_case__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_SCREAMING_SNAKE_CASE = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_SCREAMING_SNAKE_CASE = torch.cat([tensors[key], temp[key]] ,dim=snake_case__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_SCREAMING_SNAKE_CASE = tensors[key] / pretraining_tp
_SCREAMING_SNAKE_CASE = model.load_state_dict(snake_case__ ,strict=snake_case__ )
assert not other_keys.unexpected_keys, F'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
_SCREAMING_SNAKE_CASE = set(other_keys.missing_keys )
else:
_SCREAMING_SNAKE_CASE = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(snake_case__ ,exist_ok=snake_case__ )
_SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
_SCREAMING_SNAKE_CASE = model.to(config.torch_dtype )
torch.save(model.state_dict() ,snake_case__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(snake_case__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
UpperCamelCase = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 306
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self: Any , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int]=13 , UpperCAmelCase_: str=7 , UpperCAmelCase_: int=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: Any=True , UpperCAmelCase_: Tuple=99 , UpperCAmelCase_: Optional[Any]=32 , UpperCAmelCase_: Optional[int]=2 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Tuple=37 , UpperCAmelCase_: Union[str, Any]="gelu" , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: str=512 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: str=0.02 , UpperCAmelCase_: int=False , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Optional[Any]="None" , UpperCAmelCase_: Optional[int]=3 , UpperCAmelCase_: Any=4 , UpperCAmelCase_: Optional[int]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = position_biased_input
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Any , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Union[str, Any] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : Dict = False
__snake_case : Optional[Any] = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 306
| 1
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCamelCase ( self ):
UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
UpperCAmelCase = """xvjiarui/stable-diffusion-2-inpainting"""
UpperCAmelCase = FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ ,safety_checker=lowerCamelCase_ )
UpperCAmelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
UpperCAmelCase = jax.random.PRNGKey(0 )
UpperCAmelCase = 50
UpperCAmelCase = jax.device_count()
UpperCAmelCase = num_samples * [prompt]
UpperCAmelCase = num_samples * [init_image]
UpperCAmelCase = num_samples * [mask_image]
UpperCAmelCase = pipeline.prepare_inputs(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# shard inputs and rng
UpperCAmelCase = replicate(lowerCamelCase_ )
UpperCAmelCase = jax.random.split(lowerCamelCase_ ,jax.device_count() )
UpperCAmelCase = shard(lowerCamelCase_ )
UpperCAmelCase = shard(lowerCamelCase_ )
UpperCAmelCase = shard(lowerCamelCase_ )
UpperCAmelCase = pipeline(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,jit=lowerCamelCase_ )
UpperCAmelCase = output.images.reshape(lowerCamelCase_ ,512 ,512 ,3 )
UpperCAmelCase = images[0, 253:256, 253:256, -1]
UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 365
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_snake_case , config=_snake_case )
UpperCAmelCase = downstream_dict["""projector.weight"""]
UpperCAmelCase = downstream_dict["""projector.bias"""]
UpperCAmelCase = downstream_dict["""model.post_net.linear.weight"""]
UpperCAmelCase = downstream_dict["""model.post_net.linear.bias"""]
return model
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_snake_case , config=_snake_case )
UpperCAmelCase = downstream_dict["""model.linear.weight"""]
UpperCAmelCase = downstream_dict["""model.linear.bias"""]
return model
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = WavaVecaForXVector.from_pretrained(_snake_case , config=_snake_case )
UpperCAmelCase = downstream_dict["""connector.weight"""]
UpperCAmelCase = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
UpperCAmelCase = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = torch.load(_snake_case , map_location="""cpu""" )
UpperCAmelCase = checkpoint["""Downstream"""]
UpperCAmelCase = WavaVecaConfig.from_pretrained(_snake_case )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
_snake_case , return_attention_mask=_snake_case , do_normalize=_snake_case )
UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
UpperCAmelCase = convert_classification(_snake_case , _snake_case , _snake_case )
elif arch.endswith("""ForAudioFrameClassification""" ):
UpperCAmelCase = convert_diarization(_snake_case , _snake_case , _snake_case )
elif arch.endswith("""ForXVector""" ):
UpperCAmelCase = convert_xvector(_snake_case , _snake_case , _snake_case )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
_UpperCamelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 234
| 0
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger()
@dataclass
class A__ :
_UpperCAmelCase :nn.Module
_UpperCAmelCase :List[nn.Module] = field(default_factory=__snake_case )
_UpperCAmelCase :list = field(default_factory=__snake_case )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A__ :
_UpperCAmelCase :nn.Module
_UpperCAmelCase :nn.Module
_UpperCAmelCase :int = 1
_UpperCAmelCase :List = field(default_factory=__snake_case )
_UpperCAmelCase :List = field(default_factory=__snake_case )
_UpperCAmelCase :bool = True
def __call__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = Tracker(self.dest )(A_ ).parametrized
UpperCamelCase : List[str] = Tracker(self.src )(A_ ).parametrized
UpperCamelCase : List[Any] = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
UpperCamelCase : Optional[int] = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ) and self.raise_if_mismatch:
raise Exception(
F"""Numbers of operations are different. Source module has {len(A_ )} operations while"""
F""" destination module has {len(A_ )}.""" )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
class A__ ( nn.Module ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F"""Unexpected layer name {k}"""
UpperCamelCase : Dict = len(A_ ) + 1
feature_blocks.append((F"""res{block_index}""", v) )
UpperCamelCase : Optional[int] = nn.ModuleDict(A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return get_trunk_forward_outputs(
A_ , out_feat_keys=A_ , feature_blocks=self._feature_blocks , )
class A__ ( __snake_case ):
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , A_ ):
'''simple docstring'''
if x not in self:
UpperCamelCase : Tuple = self.convert_name_to_timm(A_ )
UpperCamelCase : List[Any] = partial(lambda: (timm.create_model(A_ , pretrained=A_ ).eval(), None) )
else:
UpperCamelCase : Any = super().__getitem__(A_ )
return val
class A__ ( __snake_case ):
def __getitem__( self , A_ ):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
UpperCamelCase : str = RegNetModel
else:
UpperCamelCase : Optional[int] = RegNetForImageClassification
return val
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
for from_key, to_key in keys:
UpperCamelCase : Dict = from_state_dict[from_key].clone()
print(F"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True , ) -> int:
print(F"""Converting {name}...""" )
with torch.no_grad():
UpperCamelCase , UpperCamelCase : List[Any] = from_model_func()
UpperCamelCase : Optional[Any] = our_model_func(_lowerCAmelCase ).eval()
UpperCamelCase : Optional[Any] = ModuleTransfer(src=_lowerCAmelCase , dest=_lowerCAmelCase , raise_if_mismatch=_lowerCAmelCase )
UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(_lowerCAmelCase )
if from_state_dict is not None:
UpperCamelCase : Union[str, Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCamelCase : int = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
UpperCamelCase : Any = manually_copy_vissl_head(_lowerCAmelCase , our_model.state_dict() , _lowerCAmelCase )
our_model.load_state_dict(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = our_model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = (
our_outputs.logits if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else our_outputs.last_hidden_state
)
UpperCamelCase : Union[str, Any] = from_model(_lowerCAmelCase )
UpperCamelCase : Tuple = from_output[-1] if type(_lowerCAmelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCamelCase : Tuple = our_outputs.hidden_states[-1]
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=_lowerCAmelCase , )
UpperCamelCase : Tuple = 224 if "seer" not in name else 384
# we can use the convnext one
UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=_lowerCAmelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=_lowerCAmelCase , )
print(F"""Pushed {name}""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = True ) -> List[str]:
UpperCamelCase : List[Any] = "imagenet-1k-id2label.json"
UpperCamelCase : List[Any] = 1000
UpperCamelCase : Union[str, Any] = (1, num_labels)
UpperCamelCase : int = "huggingface/label-files"
UpperCamelCase : int = num_labels
UpperCamelCase : int = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) ) , "r" ) )
UpperCamelCase : Tuple = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase : List[Any] = idalabel
UpperCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase : Dict = partial(_lowerCAmelCase , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase )
UpperCamelCase : Optional[int] = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
UpperCamelCase : Optional[Any] = NameToOurModelFuncMap()
UpperCamelCase : List[str] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_lowerCAmelCase , _lowerCAmelCase ) -> Tuple[nn.Module, Dict]:
UpperCamelCase : str = torch.hub.load_state_dict_from_url(_lowerCAmelCase , model_dir=str(_lowerCAmelCase ) , map_location="cpu" )
UpperCamelCase : str = model_func()
# check if we have a head, if yes add it
UpperCamelCase : int = files["classy_state_dict"]["base_model"]["model"]
UpperCamelCase : Tuple = model_state_dict["trunk"]
model.load_state_dict(_lowerCAmelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCamelCase : Union[str, Any] = partial(
_lowerCAmelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCamelCase : int = partial(
_lowerCAmelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCamelCase : str = partial(
_lowerCAmelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCamelCase : Optional[int] = partial(
_lowerCAmelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
UpperCamelCase : str = partial(
_lowerCAmelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCamelCase : Tuple = partial(
_lowerCAmelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCamelCase : Dict = partial(
_lowerCAmelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCamelCase : Dict = partial(
_lowerCAmelCase , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
_lowerCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _lowerCAmelCase , _lowerCAmelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_lowerCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
__lowerCamelCase : Dict = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 52
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : str = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
__lowerCamelCase : List[str] = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def A_ ( _lowerCAmelCase ) -> List[str]:
UpperCamelCase : Optional[Any] = None
# source code of `config_class`
UpperCamelCase : Tuple = inspect.getsource(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = _re_checkpoint.findall(_lowerCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
UpperCamelCase : Dict = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase : Any = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCamelCase : List[Any] = ckpt_name
break
return checkpoint
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCamelCase : Union[str, Any] = get_checkpoint_from_config_class(_lowerCAmelCase )
UpperCamelCase : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Any = "\n".join(sorted(_lowerCAmelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 52
| 1
|
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 2
lowercase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase__ )
if n > 1:
factors.append(lowerCAmelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ :Optional[int] = logging.get_logger(__name__)
lowercase__ :List[Any] = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[Any] ='''camembert'''
def __init__( self ,A__=3_0_5_2_2 ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=1 ,A__=0 ,A__=2 ,A__="absolute" ,A__=True ,A__=None ,**A__ ,):
super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__)
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = use_cache
lowercase = classifier_dropout
class lowercase ( SCREAMING_SNAKE_CASE__ ):
@property
def A__ ( self):
if self.task == "multiple-choice":
lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 97
| 1
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_SCREAMING_SNAKE_CASE : str = float('''nan''')
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = sys.stdout
SCREAMING_SNAKE_CASE__ = open(__lowerCamelCase , '''a''' )
def __getattr__( self : Any , __lowerCamelCase : Any ) -> str:
return getattr(self.stdout , __lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : List[Any] ) -> Any:
self.stdout.write(__lowerCamelCase )
# strip tqdm codes
self.file.write(re.sub(r'''^.*\r''' , '''''' , __lowerCamelCase , 0 , re.M ) )
def UpperCAmelCase_ ( _A=80 , _A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
# deal with critical env vars
SCREAMING_SNAKE_CASE__ = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
SCREAMING_SNAKE_CASE__ = os.environ.get(_A , _A )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
SCREAMING_SNAKE_CASE__ = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(_A )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = ''''''
while len(_A ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(_A ) == 0 or len(_A ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_A )
SCREAMING_SNAKE_CASE__ = ''''''
return "\\\n".join(_A )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
SCREAMING_SNAKE_CASE__ = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
SCREAMING_SNAKE_CASE__ = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def UpperCAmelCase_ ( _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} , )
SCREAMING_SNAKE_CASE__ = subprocess.run(_A , capture_output=_A , text=_A )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
SCREAMING_SNAKE_CASE__ = variation.replace(''' ''' , '''-''' )
with open(Path(_A ) / F'''log.{prefix}.stdout.txt''' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(_A ) / F'''log.{prefix}.stderr.txt''' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE__ = json.load(_A )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def UpperCAmelCase_ ( _A , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = F'''{id}: {variation:<{longest_variation_len}}'''
SCREAMING_SNAKE_CASE__ = F'''{preamble}: '''
SCREAMING_SNAKE_CASE__ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_A ) , desc=_A , leave=_A ):
SCREAMING_SNAKE_CASE__ = process_run_single(
_A , _A , _A , _A , _A , _A , _A )
SCREAMING_SNAKE_CASE__ = single_run_metrics[target_metric_key]
if not math.isnan(_A ):
metrics.append(_A )
results.append(_A )
outcome += "✓"
else:
outcome += "✘"
SCREAMING_SNAKE_CASE__ = F'''\33[2K\r{outcome}'''
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
SCREAMING_SNAKE_CASE__ = round(mean_metrics[target_metric_key] , 2 )
SCREAMING_SNAKE_CASE__ = F'''{outcome} {mean_target}'''
if len(_A ) > 1:
results_str += F''' {tuple(round(_A , 2 ) for x in results )}'''
print(_A )
SCREAMING_SNAKE_CASE__ = variation
return mean_metrics
else:
print(_A )
return {variation_key: variation, target_metric_key: nan}
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def UpperCAmelCase_ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = pd.DataFrame(_A )
SCREAMING_SNAKE_CASE__ = '''variation'''
SCREAMING_SNAKE_CASE__ = '''diff_%'''
SCREAMING_SNAKE_CASE__ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
SCREAMING_SNAKE_CASE__ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_A ):
# as a fallback, use the minimal value as the sentinel
SCREAMING_SNAKE_CASE__ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_A ):
SCREAMING_SNAKE_CASE__ = df.apply(
lambda _A : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
SCREAMING_SNAKE_CASE__ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
SCREAMING_SNAKE_CASE__ = df.reindex(_A , axis='''columns''' ) # reorder cols
# capitalize
SCREAMING_SNAKE_CASE__ = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
SCREAMING_SNAKE_CASE__ = df.rename(lambda _A : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
SCREAMING_SNAKE_CASE__ = df.rename(lambda _A : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
SCREAMING_SNAKE_CASE__ = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_A , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_A , floatfmt='''.2f''' )]
print('''\n\n'''.join(_A ) )
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=_A , type=_A , required=_A , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=_A , type=_A , nargs='''+''' , required=_A , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=_A , type=_A , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=_A , type=_A , required=_A , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=_A , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=_A , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=_A , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=_A , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = args.output_dir
Path(_A ).mkdir(exist_ok=_A )
SCREAMING_SNAKE_CASE__ = get_base_command(_A , _A )
# split each dimension into its --foo variations
SCREAMING_SNAKE_CASE__ = [list(map(str.strip , re.split(R'''\|''' , _A ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
SCREAMING_SNAKE_CASE__ = list(map(str.strip , map(''' '''.join , itertools.product(*_A ) ) ) )
SCREAMING_SNAKE_CASE__ = max(len(_A ) for x in variations )
# split wanted keys
SCREAMING_SNAKE_CASE__ = args.report_metric_keys.split()
# capture prints into a log file for convenience
SCREAMING_SNAKE_CASE__ = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
SCREAMING_SNAKE_CASE__ = Tee(_A )
print(F'''\n*** Running {len(_A )} benchmarks:''' )
print(F'''Base command: {" ".join(_A )}''' )
SCREAMING_SNAKE_CASE__ = '''variation'''
SCREAMING_SNAKE_CASE__ = []
for id, variation in enumerate(tqdm(_A , desc='''Total completion: ''' , leave=_A ) ):
SCREAMING_SNAKE_CASE__ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _A , _A , _A , _A , args.target_metric_key , _A , args.repeat_times , _A , args.verbose , ) )
process_results(_A , args.target_metric_key , _A , args.base_variation , _A )
if __name__ == "__main__":
main()
| 314
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ( ):
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ = nn.Linear(4 , 5 )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] ) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = mock_training_loop_function('''hello''' )
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def lowercase_ ( self : str ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__lowerCamelCase : Optional[Any] ):
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def lowercase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = release_memory(__lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCamelCase )
| 314
| 1
|
import random
def UpperCamelCase_( snake_case__: int ) -> bool:
UpperCAmelCase__ = num - 1
UpperCAmelCase__ = 0
while s % 2 == 0:
UpperCAmelCase__ = s // 2
t += 1
for _ in range(5 ):
UpperCAmelCase__ = random.randrange(2 , num - 1 )
UpperCAmelCase__ = pow(snake_case__ , snake_case__ , snake_case__ )
if v != 1:
UpperCAmelCase__ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCAmelCase__ = i + 1
UpperCAmelCase__ = (v**2) % num
return True
def UpperCamelCase_( snake_case__: int ) -> bool:
if num < 2:
return False
UpperCAmelCase__ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def UpperCamelCase_( snake_case__: int = 10_24 ) -> int:
while True:
UpperCAmelCase__ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
_UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 335
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_UpperCamelCase = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCamelCase_( snake_case__: int ) -> str:
for pegasus_name, hf_name in PATTERNS:
UpperCAmelCase__ = k.replace(snake_case__ , snake_case__ )
return k
def UpperCamelCase_( snake_case__: dict , snake_case__: dict ) -> PegasusForConditionalGeneration:
UpperCAmelCase__ = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
UpperCAmelCase__ = PegasusConfig(**snake_case__ )
UpperCAmelCase__ = PegasusForConditionalGeneration(snake_case__ )
UpperCAmelCase__ = torch_model.model.state_dict()
UpperCAmelCase__ = {}
for k, v in tf_weights.items():
UpperCAmelCase__ = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
UpperCAmelCase__ = v.T
UpperCAmelCase__ = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
UpperCAmelCase__ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
UpperCAmelCase__ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def UpperCamelCase_( snake_case__: int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
UpperCAmelCase__ = tf.train.list_variables(snake_case__ )
UpperCAmelCase__ = {}
UpperCAmelCase__ = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
UpperCAmelCase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase__ = tf.train.load_variable(snake_case__ , snake_case__ )
UpperCAmelCase__ = array
return tf_weights
def UpperCamelCase_( snake_case__: str , snake_case__: str ) -> Optional[Any]:
# save tokenizer first
UpperCAmelCase__ = Path(snake_case__ ).parent.name
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
UpperCAmelCase__ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
UpperCAmelCase__ = get_tf_weights_as_numpy(snake_case__ )
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
UpperCAmelCase__ = task_specific_params
UpperCAmelCase__ = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
UpperCAmelCase__ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_UpperCamelCase = parser.parse_args()
if args.save_dir is None:
_UpperCamelCase = Path(args.tf_ckpt_path).parent.name
_UpperCamelCase = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 335
| 1
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_snake_case : Optional[Any] = 'pt'
elif is_tf_available():
_snake_case : List[str] = 'tf'
else:
_snake_case : List[str] = 'jax'
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = ByTaTokenizer
a_ = False
def lowercase ( self : Any ) -> Dict:
super().setUp()
__lowerCAmelCase = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : str ) -> List[Any]:
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def lowercase ( self : Any , **lowerCAmelCase_ : List[Any] ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Dict=2_0 , lowerCAmelCase_ : List[str]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__lowerCAmelCase = []
for i in range(len(lowerCAmelCase_ ) ):
try:
__lowerCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowerCAmelCase = list(filter(lambda lowerCAmelCase_ : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowerCAmelCase_ ) )
__lowerCAmelCase = list(filter(lambda lowerCAmelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase_ ) , lowerCAmelCase_ ) )
if max_length is not None and len(lowerCAmelCase_ ) > max_length:
__lowerCAmelCase = toks[:max_length]
if min_length is not None and len(lowerCAmelCase_ ) < min_length and len(lowerCAmelCase_ ) > 0:
while len(lowerCAmelCase_ ) < min_length:
__lowerCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCAmelCase = [t[0] for t in toks]
# Ensure consistency
__lowerCAmelCase = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
if " " not in output_txt and len(lowerCAmelCase_ ) > 1:
__lowerCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase_ )
)
if with_prefix_space:
__lowerCAmelCase = ' ' + output_txt
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
return output_txt, output_ids
def lowercase ( self : Optional[int] ) -> int:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
__lowerCAmelCase = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def lowercase ( self : Tuple ) -> Any:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = 'Unicode €.'
__lowerCAmelCase = tokenizer(lowerCAmelCase_ )
__lowerCAmelCase = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded['input_ids'] , lowerCAmelCase_ )
# decoding
__lowerCAmelCase = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , 'Unicode €.</s>' )
__lowerCAmelCase = tokenizer('e è é ê ë' )
__lowerCAmelCase = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded['input_ids'] , lowerCAmelCase_ )
# decoding
__lowerCAmelCase = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def lowercase ( self : int ) -> Tuple:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__lowerCAmelCase = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
if FRAMEWORK != "jax":
__lowerCAmelCase = list(batch.input_ids.numpy()[0] )
else:
__lowerCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertNotIn('decoder_input_ids' , lowerCAmelCase_ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> Optional[int]:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = [
'Summary of the text.',
'Another summary.',
]
__lowerCAmelCase = tokenizer(
text_target=lowerCAmelCase_ , max_length=3_2 , padding='max_length' , truncation=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = ['A long paragraph for summarization. </s>']
__lowerCAmelCase = ['Summary of the text. </s>']
# fmt: off
__lowerCAmelCase = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
__lowerCAmelCase = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , text_target=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch['input_ids'][0] )
self.assertEqual(lowerCAmelCase_ , batch['labels'][0] )
def lowercase ( self : Any ) -> Union[str, Any]:
# safety check on max_len default value so we are sure the test works
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = ' He is very happy, UNwant\u00E9d,running'
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
shutil.rmtree(lowerCAmelCase_ )
__lowerCAmelCase = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__lowerCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
__lowerCAmelCase = tokenizer.__class__.from_pretrained(lowerCAmelCase_ , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
__lowerCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__lowerCAmelCase = json.load(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__lowerCAmelCase = json.load(lowerCAmelCase_ )
__lowerCAmelCase = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
__lowerCAmelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
__lowerCAmelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCAmelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCAmelCase = tokenizer_class.from_pretrained(
lowerCAmelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCAmelCase = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowerCAmelCase_ )]
__lowerCAmelCase = tokenizer_class.from_pretrained(
lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_class.from_pretrained(lowerCAmelCase_ )
self.assertTrue(tokenizer.decode([2_5_5] ) == '' )
def lowercase ( self : str ) -> List[Any]:
pass
def lowercase ( self : Tuple ) -> Dict:
pass
def lowercase ( self : int ) -> Any:
pass
def lowercase ( self : Any ) -> Tuple:
pass
def lowercase ( self : List[str] ) -> int:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__lowerCAmelCase = self.get_tokenizers(fast=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
__lowerCAmelCase = tokenizer.convert_tokens_to_string(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__lowerCAmelCase = 0
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
for attr in attributes_list:
setattr(lowerCAmelCase_ , attr + '_id' , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + '_id' ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , attr + '_id' , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + '_id' ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(lowerCAmelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 284
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : List[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284
| 1
|
'''simple docstring'''
def __UpperCamelCase ( ):
lowercase__ : Any = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowercase__ : Any = 6
lowercase__ : Optional[Any] = 1
lowercase__ : int = 1901
lowercase__ : List[str] = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowercase__ : List[Any] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowercase__ : Any = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowercase__ : List[Any] = day - days_per_month[month - 2]
if month > 12:
year += 1
lowercase__ : Dict = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 214
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = len(UpperCAmelCase )
print('''The following activities are selected:''' )
# The first activity is always selected
lowercase__ : str = 0
print(UpperCAmelCase , end=''',''' )
# Consider rest of the activities
for j in range(UpperCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCAmelCase , end=''',''' )
lowercase__ : str = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__a: str = [1, 3, 0, 5, 8, 5]
__a: Optional[Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 214
| 1
|
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case , snake_case , snake_case , snake_case ) -> int: # noqa: E741
while r - l > 1:
_lowercase : Dict = (l + r) // 2
if v[m] >= key:
_lowercase : List[Any] = m
else:
_lowercase : Tuple = m # noqa: E741
return r
def _A ( snake_case ) -> int:
if len(snake_case ) == 0:
return 0
_lowercase : Union[str, Any] = [0] * len(snake_case )
_lowercase : Optional[Any] = 1
_lowercase : int = v[0]
for i in range(1 , len(snake_case ) ):
if v[i] < tail[0]:
_lowercase : List[Any] = v[i]
elif v[i] > tail[length - 1]:
_lowercase : Tuple = v[i]
length += 1
else:
_lowercase : List[str] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Dict = ['pixel_values']
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(**_UpperCamelCase )
_lowercase : Dict = size if size is not None else {"shortest_edge": 224}
_lowercase : List[Any] = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
_lowercase : Union[str, Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowercase : Tuple = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name="crop_size" )
_lowercase : List[str] = do_resize
_lowercase : Dict = size
_lowercase : Any = resample
_lowercase : int = do_center_crop
_lowercase : Optional[Any] = crop_size
_lowercase : Tuple = do_rescale
_lowercase : Any = rescale_factor
_lowercase : Union[str, Any] = do_normalize
_lowercase : List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowercase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
_lowercase : Optional[int] = do_convert_rgb
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : int = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowercase : List[str] = get_resize_output_image_size(_UpperCamelCase , size=size["shortest_edge"] , default_to_square=_UpperCamelCase )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : int = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCamelCase , size=(size["height"], size["width"]) , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : Tuple = do_resize if do_resize is not None else self.do_resize
_lowercase : Union[str, Any] = size if size is not None else self.size
_lowercase : Optional[int] = get_size_dict(_UpperCamelCase , param_name="size" , default_to_square=_UpperCamelCase )
_lowercase : List[Any] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
_lowercase : Tuple = get_size_dict(_UpperCamelCase , param_name="crop_size" , default_to_square=_UpperCamelCase )
_lowercase : Any = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : Dict = image_std if image_std is not None else self.image_std
_lowercase : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowercase : str = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowercase : List[Any] = [convert_to_rgb(_UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
_lowercase : List[Any] = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
_lowercase : Optional[Any] = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
_lowercase : Optional[int] = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
_lowercase : Any = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
_lowercase : List[Any] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
_lowercase : List[Any] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
_lowercase : Dict = {"pixel_values": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 250
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = {}
A__ = 2
while True:
A__ = factor_map.pop(UpperCamelCase__ , UpperCamelCase__ )
if factor:
A__ = factor + prime
while x in factor_map:
x += factor
A__ = factor
else:
A__ = prime
yield prime
prime += 1
def UpperCAmelCase ( UpperCamelCase__ = 1E10 ):
"""simple docstring"""
A__ = sieve()
A__ = 1
while True:
A__ = next(UpperCamelCase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCamelCase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 154
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 154
| 1
|
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowerCAmelCase_ = set()
return any(
node not in visited and depth_first_search(_A , _A , _A , _A )
for node in graph )
def __UpperCamelCase ( _A , _A , _A , _A ):
visited.add(_A )
rec_stk.add(_A )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_A , _A , _A , _A ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_A )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 278
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
lowerCAmelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCAmelCase_ = [4, 4, 4, 4]
lowerCAmelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
else:
lowerCAmelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCAmelCase_ = 96
elif "small" in model_name:
lowerCAmelCase_ = 96
elif "base" in model_name:
lowerCAmelCase_ = 128
elif "large" in model_name:
lowerCAmelCase_ = 192
elif "xlarge" in model_name:
lowerCAmelCase_ = 256
elif "huge" in model_name:
lowerCAmelCase_ = 352
# set label information
lowerCAmelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCAmelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = FocalNetConfig(
embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , )
return config
def __UpperCamelCase ( _A ):
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowerCAmelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowerCAmelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase_ = name.replace('''head''' , '''classifier''' )
else:
lowerCAmelCase_ = '''focalnet.''' + name
return name
def __UpperCamelCase ( _A , _A , _A=False ):
# fmt: off
lowerCAmelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCAmelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _A )
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
lowerCAmelCase_ = val
lowerCAmelCase_ = get_focalnet_config(_A )
lowerCAmelCase_ = FocalNetForImageClassification(_A )
model.eval()
# load state dict
model.load_state_dict(_A )
# verify conversion
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = BitImageProcessor(
do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , )
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' )
lowerCAmelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 )
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
_A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278
| 1
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: nn.ModuleList , lowerCAmelCase: nn.ModuleList , lowerCAmelCase: List[int] ) -> None:
_UpperCAmelCase : Tuple = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F'{len(lowerCAmelCase )} != {len(lowerCAmelCase )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
SCREAMING_SNAKE_CASE_ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
SCREAMING_SNAKE_CASE_ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Optional[Any] ) -> Any:
try:
_UpperCAmelCase : Optional[Any] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(lowerCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] , lowerCAmelCase: int ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, PreTrainedModel] , lowerCAmelCase: Union[str, Path] = "student" , lowerCAmelCase: Union[int, None] = None , lowerCAmelCase: Union[int, None] = None , lowerCAmelCase: Tuple=False , lowerCAmelCase: List[Any]=None , lowerCAmelCase: Tuple=None , **lowerCAmelCase: Any , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
_UpperCAmelCase : Tuple = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
_UpperCAmelCase : str = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F'teacher must be a model or string got type {type(lowerCAmelCase )}'
_UpperCAmelCase : List[str] = teacher.config.to_diff_dict()
try:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase : Optional[Any] = teacher_e
if d is None:
_UpperCAmelCase : str = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
_UpperCAmelCase , _UpperCAmelCase : str = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase : List[Any] = teacher_e
if d is None:
_UpperCAmelCase : Optional[int] = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
_UpperCAmelCase : Tuple = teacher.config_class(**lowerCAmelCase )
_UpperCAmelCase : int = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase : int = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase , _UpperCAmelCase : Dict = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
_UpperCAmelCase : Dict = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 189
|
from typing import Any
class a :
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = data
_UpperCAmelCase : Any = None
class a :
def __init__( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase : str = temp.next
print()
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = Node(A_ )
_UpperCAmelCase : Tuple = self.head
_UpperCAmelCase : Tuple = new_node
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase : int = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : Tuple = node_a.next
_UpperCAmelCase : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : List[Any] = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 189
| 1
|
"""simple docstring"""
from math import factorial
def a__ ( snake_case__ = 20 ) -> int:
lowerCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCamelCase = n // 2
return int(factorial(lowercase_ ) / (factorial(lowercase_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowerCAmelCase : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 291
|
def UpperCamelCase (lowercase_: int = 10 ) -> str:
if not isinstance(lowercase_ , lowercase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ : List[str] = 10**n
A__ : Any = 28433 * (pow(2 , 7830457 , lowercase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 192
| 0
|
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> Any:
if len(lowerCAmelCase_ ) <= 1:
return [tuple(lowerCAmelCase_ )]
_a : Any = []
def generate(lowerCAmelCase_ , lowerCAmelCase_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCAmelCase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_a , _a : Union[str, Any] = arr[k - 1], arr[i]
else: # k is odd
_a , _a : str = arr[k - 1], arr[0]
generate(k - 1 , lowerCAmelCase_ )
generate(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
return res
if __name__ == "__main__":
__lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 355
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__lowerCAmelCase = 3
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
print('Generating primitive root of p' )
while True:
_a : List[Any] = random.randrange(3 , lowerCAmelCase_ )
if pow(lowerCAmelCase_ , 2 , lowerCAmelCase_ ) == 1:
continue
if pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) == 1:
continue
return g
def __lowerCamelCase ( lowerCAmelCase_ ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...' )
_a : int = rabin_miller.generate_large_prime(lowerCAmelCase_ ) # select large prime number.
_a : List[str] = primitive_root(lowerCAmelCase_ ) # one primitive root on modulo p.
_a : Any = random.randrange(3 , lowerCAmelCase_ ) # private_key -> have to be greater than 2 for safety.
_a : List[Any] = cryptomath.find_mod_inverse(pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
_a : Tuple = (key_size, e_a, e_a, p)
_a : str = (key_size, d)
return public_key, private_key
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
_a , _a : Dict = generate_key(lowerCAmelCase_ )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , 'w' ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , 'w' ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def __lowerCamelCase ( ) -> None:
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 107
| 0
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Dict =get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
_lowercase: Optional[Any] = XLMRobertaTokenizer
_lowercase: Any = XLMRobertaTokenizerFast
_lowercase: int = True
_lowercase: Union[str, Any] = True
def lowercase__ ( self : List[Any] ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase = XLMRobertaTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
_lowerCAmelCase = """<pad>"""
_lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowercase__ ( self : Dict ) -> Union[str, Any]:
_lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 10_02 )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def lowercase__ ( self : Dict ) -> Dict:
_lowerCAmelCase = XLMRobertaTokenizer(__snake_case , keep_accents=__snake_case )
_lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowercase__ ( self : List[str] ) -> Tuple:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
_lowerCAmelCase = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = tokenizer_r.save_pretrained(__snake_case )
_lowerCAmelCase = tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
_lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
_lowerCAmelCase = tokenizer_r.from_pretrained(__snake_case )
_lowerCAmelCase = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
_lowerCAmelCase = tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
_lowerCAmelCase = tokenizer_r.from_pretrained(__snake_case )
_lowerCAmelCase = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
_lowerCAmelCase = tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase = tokenizer_r.from_pretrained(__snake_case )
_lowerCAmelCase = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@cached_property
def lowercase__ ( self : Dict ) -> List[Any]:
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__snake_case , f.name )
_lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__snake_case )
_lowerCAmelCase = pickle.dumps(__snake_case )
pickle.loads(__snake_case )
def lowercase__ ( self : Optional[int] ) -> List[str]:
if not self.test_rust_tokenizer:
return
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = """I was born in 92000, and this is falsé."""
_lowerCAmelCase = tokenizer.tokenize(__snake_case )
_lowerCAmelCase = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
_lowerCAmelCase = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
_lowerCAmelCase = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = tokenizer.encode(__snake_case )
_lowerCAmelCase = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowercase__ ( self : Dict ) -> List[str]:
_lowerCAmelCase = """Hello World!"""
_lowerCAmelCase = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
_lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
_lowerCAmelCase = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def lowercase__ ( self : str ) -> Any:
# fmt: off
_lowerCAmelCase = {"""input_ids""": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 70
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__ : List[str] =logging.get_logger(__name__)
A__ : Any ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : Any ={
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
A__ : Optional[int] ={
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
A__ : Optional[int] ={
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Optional[Any] = VOCAB_FILES_NAMES
_lowercase: Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowercase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: str = PRETRAINED_INIT_CONFIGURATION
_lowercase: List[Any] = RoFormerTokenizer
def __init__( self : Dict , __snake_case : str=None , __snake_case : Tuple=None , __snake_case : List[Any]=True , __snake_case : str="[UNK]" , __snake_case : Tuple="[SEP]" , __snake_case : str="[PAD]" , __snake_case : str="[CLS]" , __snake_case : Any="[MASK]" , __snake_case : Dict=True , __snake_case : str=None , **__snake_case : Optional[Any] , ) -> Union[str, Any]:
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , )
_lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , __snake_case ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , __snake_case ) != strip_accents
):
_lowerCAmelCase = getattr(__snake_case , pre_tok_state.pop("""type""" ) )
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = strip_accents
_lowerCAmelCase = pre_tok_class(**__snake_case )
_lowerCAmelCase = do_lower_case
def __getstate__( self : int ) -> Optional[int]:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = BertPreTokenizer()
return state
def __setstate__( self : Tuple , __snake_case : Tuple ) -> List[str]:
_lowerCAmelCase = d
_lowerCAmelCase = self.__dict__["""_tokenizer"""].get_vocab()
_lowerCAmelCase = PreTokenizer.custom(JiebaPreTokenizer(__snake_case ) )
def lowercase__ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[int]=None ) -> Optional[Any]:
_lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[str] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : int , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
_lowerCAmelCase = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowercase__ ( self : Dict , __snake_case : Dict , __snake_case : int=None , __snake_case : List[Any]=None , __snake_case : List[Any]=False , **__snake_case : Dict , ) -> str:
_lowerCAmelCase = BertPreTokenizer()
return super().save_pretrained(__snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
| 70
| 1
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase_ = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : List[str] = PegasusConfig
__UpperCAmelCase : Dict = {}
__UpperCAmelCase : str = 'gelu'
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = eos_token_id
__a = pad_token_id
__a = bos_token_id
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__a = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__a = np.concatenate([input_ids, eos_tensor] , axis=1 )
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__a = prepare_pegasus_inputs_dict(_a , _a , _a )
return config, inputs_dict
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = 20
__a = model_class_name(_a )
__a = model.encode(inputs_dict['''input_ids'''] )
__a , __a = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__a = model.init_cache(decoder_input_ids.shape[0] , _a , _a )
__a = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a = model.decode(
decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , )
__a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__a = model.decode(
decoder_input_ids[:, -1:] , _a , decoder_attention_mask=_a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_a , )
__a = model.decode(_a , _a )
__a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = 20
__a = model_class_name(_a )
__a = model.encode(inputs_dict['''input_ids'''] )
__a , __a = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__a = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__a = model.init_cache(decoder_input_ids.shape[0] , _a , _a )
__a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a = model.decode(
decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , )
__a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__a = model.decode(
decoder_input_ids[:, -1:] , _a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_a , decoder_position_ids=_a , )
__a = model.decode(_a , _a , decoder_attention_mask=_a )
__a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Optional[int]=None , ) -> str:
if attention_mask is None:
__a = np.not_equal(lowerCAmelCase__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__a = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCAmelCase : Optional[Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self ):
__a = FlaxPegasusModelTester(self )
__a = ConfigTester(self , config_class=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_a , _a , _a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_a , _a , _a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a = self._prepare_for_class(_a , _a )
__a = model_class(_a )
@jax.jit
def encode_jitted(_a , _a=None , **_a ):
return model.encode(input_ids=_a , attention_mask=_a )
with self.subTest('''JIT Enabled''' ):
__a = encode_jitted(**_a ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__a = encode_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a = model_class(_a )
__a = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__a = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_a , _a , _a ):
return model.decode(
decoder_input_ids=_a , decoder_attention_mask=_a , encoder_outputs=_a , )
with self.subTest('''JIT Enabled''' ):
__a = decode_jitted(**_a ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__a = decode_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __UpperCAmelCase ( self ):
for model_class_name in self.all_model_classes:
__a = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=_a )
__a = np.ones((1, 1) )
__a = model(_a )
self.assertIsNotNone(_a )
@slow
def __UpperCAmelCase ( self ):
__a = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
__a = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
__a = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__a = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
__a = tokenizer(_a , return_tensors='''np''' , truncation=_a , max_length=512 , padding=_a )
__a = model.generate(**_a , num_beams=2 ).sequences
__a = tokenizer.batch_decode(_a , skip_special_tokens=_a )
assert tgt_text == decoded
| 11
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase ( lowerCAmelCase__ : dict ) -> tuple:
return (data["data"], data["target"])
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ) -> np.ndarray:
__a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ )
# Predict target for test data
__a = xgb.predict(lowerCAmelCase__ )
__a = predictions.reshape(len(lowerCAmelCase__ ) , 1 )
return predictions
def lowercase ( ) -> None:
__a = fetch_california_housing()
__a , __a = data_handling(lowerCAmelCase__ )
__a , __a , __a , __a = train_test_split(
lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 )
__a = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 11
| 1
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class a__ :
"""simple docstring"""
__lowerCamelCase = BlenderbotConfig
__lowerCamelCase = {}
__lowerCamelCase = 'gelu'
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def UpperCamelCase ( self , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
A__ = TFBlenderbotModel(config=lowercase ).get_decoder()
A__ = inputs_dict["input_ids"]
A__ = input_ids[:1, :]
A__ = inputs_dict["attention_mask"][:1, :]
A__ = inputs_dict["head_mask"]
A__ = 1
# first forward pass
A__ = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(lowercase , attention_mask=lowercase )[0]
A__ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Any=None , SCREAMING_SNAKE_CASE_: int=None , SCREAMING_SNAKE_CASE_: int=None , SCREAMING_SNAKE_CASE_: int=None , SCREAMING_SNAKE_CASE_: List[str]=None , ) -> Optional[int]:
'''simple docstring'''
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowerCamelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = TFBlenderbotModelTester(self )
A__ = ConfigTester(self , config_class=lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_tokenizers
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = ['My friends are cool but they eat too many carbs.']
__lowerCamelCase = 'facebook/blenderbot-400M-distill'
@cached_property
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.tokenizer(self.src_text , return_tensors="tf" )
A__ = self.model.generate(
model_inputs.input_ids , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 68
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCamelCase__ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
lowerCamelCase__ = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
lowerCamelCase__ = '▁'
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any]="<s>" , lowerCamelCase__ : Dict="</s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : List[Any]="<s>" , lowerCamelCase__ : List[str]="<unk>" , lowerCamelCase__ : str="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Optional[Dict[str, Any]] = None , **lowerCamelCase__ : Dict , ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
_UpperCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
_UpperCAmelCase : Union[str, Any] = vocab_file
_UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[int] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_UpperCAmelCase : List[str] = len(self.sp_model ) - 1
_UpperCAmelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
_UpperCAmelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [self.sep_token_id]
_UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase__ ( self : int ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Optional[Any] = self.sp_model.PieceToId(lowerCamelCase__ )
return spm_id if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Any ) ->Optional[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Optional[Any] = ""
_UpperCAmelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Union[str, Any] = []
else:
current_sub_tokens.append(lowerCamelCase__ )
_UpperCAmelCase : int = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def __getstate__( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.__dict__.copy()
_UpperCAmelCase : Optional[int] = None
return state
def __setstate__( self : Any , lowerCamelCase__ : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , "wb" ) as fi:
_UpperCAmelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 234
| 0
|
class lowercase_ :
def __init__( self , __UpperCamelCase = "" , __UpperCamelCase = False ):
"""simple docstring"""
UpperCamelCase_ = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase_ = is_leaf
UpperCamelCase_ = prefix
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = 0
for q, w in zip(self.prefix , __UpperCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
for word in words:
self.insert(__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
if self.prefix == word:
UpperCamelCase_ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase_ = RadixNode(prefix=__UpperCamelCase , is_leaf=__UpperCamelCase )
else:
UpperCamelCase_ = self.nodes[word[0]]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = incoming_node.match(
__UpperCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__UpperCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase_ = remaining_prefix
UpperCamelCase_ = self.nodes[matching_string[0]]
UpperCamelCase_ = RadixNode(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = aux_node
if remaining_word == "":
UpperCamelCase_ = True
else:
self.nodes[matching_string[0]].insert(__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.nodes.get(word[0] , __UpperCamelCase )
if not incoming_node:
return False
else:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = incoming_node.match(
__UpperCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.nodes.get(word[0] , __UpperCamelCase )
if not incoming_node:
return False
else:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = incoming_node.match(
__UpperCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__UpperCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
UpperCamelCase_ = list(self.nodes.values() )[0]
UpperCamelCase_ = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase_ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
UpperCamelCase_ = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase_ = list(incoming_node.nodes.values() )[0]
UpperCamelCase_ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase_ = merging_node.nodes
return True
def lowerCamelCase_ ( self , __UpperCamelCase = 0 ):
"""simple docstring"""
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCamelCase__ ( ) -> bool:
UpperCamelCase_ = """banana bananas bandana band apple all beast""".split()
UpperCamelCase_ = RadixNode()
root.insert_many(a__ )
assert all(root.find(a__ ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def lowerCamelCase__ ( ) -> None:
assert test_trie()
def lowerCamelCase__ ( ) -> None:
UpperCamelCase_ = RadixNode()
UpperCamelCase_ = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(a__ )
print("""Words:""" , a__ )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 352
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( a__ : Dict ) -> List[Any]:
UpperCamelCase_ = {}
UpperCamelCase_ = tokenizer(example["""content"""] , truncation=a__ )["""input_ids"""]
UpperCamelCase_ = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
_A = HfArgumentParser(PretokenizationArguments)
_A = parser.parse_args()
if args.num_workers is None:
_A = multiprocessing.cpu_count()
_A = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_A = time.time()
_A = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
_A = time.time()
_A = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_A = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 261
| 0
|
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
assert x is not None
assert y is not None
lowerCamelCase = len(snake_case__ )
lowerCamelCase = len(snake_case__ )
# declaring the array for storing the dp values
lowerCamelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
lowerCamelCase = 1 if x[i - 1] == y[j - 1] else 0
lowerCamelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
lowerCamelCase = """"""
lowerCamelCase , lowerCamelCase = m, n
while i > 0 and j > 0:
lowerCamelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
lowerCamelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
lowerCAmelCase : Tuple = """AGGTAB"""
lowerCAmelCase : Tuple = """GXTXAYB"""
lowerCAmelCase : Any = 4
lowerCAmelCase : Optional[int] = """GTAB"""
lowerCAmelCase , lowerCAmelCase : Optional[Any] = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 291
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_attention_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_choices
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_attention_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerModelTester(self )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_a )
lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowerCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase = model(_a )[0]
lowerCamelCase = 50_000
lowerCamelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , _a )
lowerCamelCase = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 291
| 1
|
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ : Optional[Any] = 16
lowerCAmelCase__ : Optional[int] = 32
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase = 16 ):
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
__UpperCAmelCase : Union[str, Any] = load_dataset("glue", "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : int = tokenizer(examples["sentence1"], examples["sentence2"], truncation=_UpperCAmelCase, max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase : Any = datasets.map(
_UpperCAmelCase, batched=_UpperCAmelCase, remove_columns=["idx", "sentence1", "sentence2"], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase : Optional[Any] = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase : Tuple = 16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase : int = 8
else:
__UpperCAmelCase : Optional[Any] = None
return tokenizer.pad(
_UpperCAmelCase, padding="longest", max_length=_UpperCAmelCase, pad_to_multiple_of=_UpperCAmelCase, return_tensors="pt", )
# Instantiate dataloaders.
__UpperCAmelCase : List[Any] = DataLoader(
tokenized_datasets["train"], shuffle=_UpperCAmelCase, collate_fn=_UpperCAmelCase, batch_size=_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["validation"], shuffle=_UpperCAmelCase, collate_fn=_UpperCAmelCase, batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase__ : List[str] = mocked_dataloaders # noqa: F811
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", _UpperCAmelCase ) == "1":
__UpperCAmelCase : Any = 2
# Initialize accelerator
__UpperCAmelCase : List[str] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : List[Any] = config["lr"]
__UpperCAmelCase : int = int(config["num_epochs"] )
__UpperCAmelCase : Optional[Any] = int(config["seed"] )
__UpperCAmelCase : List[str] = int(config["batch_size"] )
__UpperCAmelCase : List[Any] = evaluate.load("glue", "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCAmelCase )
def inner_training_loop(_UpperCAmelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase : Optional[Any] = AdamW(params=model.parameters(), lr=_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = get_dataloaders(_UpperCAmelCase, _UpperCAmelCase )
# Instantiate scheduler
__UpperCAmelCase : List[str] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase, num_warmup_steps=100, num_training_steps=(len(_UpperCAmelCase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = accelerator.prepare(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase : List[Any] = model(**_UpperCAmelCase )
__UpperCAmelCase : Dict = outputs.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase : int = model(**_UpperCAmelCase )
__UpperCAmelCase : Tuple = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase, references=_UpperCAmelCase, )
__UpperCAmelCase : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:", _UpperCAmelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCamelCase ( ):
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision", type=_UpperCAmelCase, default=_UpperCAmelCase, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
__UpperCAmelCase : Dict = parser.parse_args()
__UpperCAmelCase : Tuple = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase, _UpperCAmelCase )
if __name__ == "__main__":
main()
| 37
|
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __UpperCamelCase ( *_UpperCAmelCase ):
with open(_UpperCAmelCase, "r" ) as fh:
fcntl.flock(_UpperCAmelCase, fcntl.LOCK_EX )
try:
print(*_UpperCAmelCase )
finally:
fcntl.flock(_UpperCAmelCase, fcntl.LOCK_UN )
lowerCAmelCase__ : Dict = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
lowerCAmelCase__ : Optional[int] = torch.device("cuda", local_rank)
lowerCAmelCase__ : List[str] = socket.gethostname()
lowerCAmelCase__ : Optional[Any] = f"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCAmelCase__ : Tuple = dist.get_rank()
lowerCAmelCase__ : Optional[int] = dist.get_world_size()
printflock(f"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(f"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(f"{gpu} is broken")
raise
| 37
| 1
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
# Base Case
if curr_ind == len(_UpperCAmelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_UpperCAmelCase ) ):
if valid_connection(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Insert current vertex into path as next transition
lowerCamelCase__ : List[Any] = next_ver
# Validate created path
if util_hamilton_cycle(_UpperCAmelCase , _UpperCAmelCase , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase__ : List[str] = -1
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = 0 ) -> list[int]:
lowerCamelCase__ : Any = [-1] * (len(_UpperCAmelCase ) + 1)
# initialize start and end of path with starting index
lowerCamelCase__ : Optional[Any] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_UpperCAmelCase , _UpperCAmelCase , 1 ) else []
| 50
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCAmelCase : int = None
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : List[Any] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
_UpperCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
UpperCAmelCase__ = NllbTokenizer
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self : Tuple , UpperCAmelCase : int=None , UpperCAmelCase : Any=None , UpperCAmelCase : str="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : str="</s>" , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : Optional[Any]="<unk>" , UpperCAmelCase : List[str]="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : Tuple=None , UpperCAmelCase : int=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Any=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : List[Any] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
lowerCamelCase__ : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , legacy_behaviour=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : List[Any] = vocab_file
lowerCamelCase__ : Dict = False if not self.vocab_file else True
lowerCamelCase__ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ : str = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ : int = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ : List[Any] = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A_ ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def A_ ( self : List[Any] , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : Dict = [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : int , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Optional[str] , UpperCAmelCase : Optional[str] , **UpperCAmelCase : List[str] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ : Optional[int] = src_lang
lowerCamelCase__ : Optional[int] = self(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tgt_lang_id
return inputs
def A_ ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : str = "eng_Latn" , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "fra_Latn" , **UpperCAmelCase : Dict , ) -> BatchEncoding:
lowerCamelCase__ : Any = src_lang
lowerCamelCase__ : int = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self : Any ) -> Union[str, Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self : str , UpperCAmelCase : Optional[Any] ) -> None:
lowerCamelCase__ : int = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : int = []
lowerCamelCase__ : str = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : int = [self.cur_lang_code]
lowerCamelCase__ : Tuple = [self.eos_token_id]
lowerCamelCase__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : int , UpperCAmelCase : str ) -> None:
lowerCamelCase__ : Union[str, Any] = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : Any = [self.cur_lang_code]
lowerCamelCase__ : Optional[Any] = [self.eos_token_id]
lowerCamelCase__ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A_ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCamelCase__ : int = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 50
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
snake_case = k.replace(__lowerCAmelCase , __lowerCAmelCase )
if k.startswith("""encoder""" ):
snake_case = k.replace(""".attn""" , """.self_attn""" )
snake_case = k.replace("""norm1""" , """self_attn_layer_norm""" )
snake_case = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
snake_case = k.replace("""norm1""" , """self_attn_layer_norm""" )
snake_case = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
snake_case = k.replace("""norm3""" , """final_layer_norm""" )
return k
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] ) -> Tuple:
snake_case = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
snake_case = sd.pop(__lowerCAmelCase )
snake_case = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
snake_case = v
_SCREAMING_SNAKE_CASE = ["START"]
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ) -> List[Any]:
snake_case = torch.load(__lowerCAmelCase , map_location="""cpu""" )
snake_case = model["""model"""]
snake_case = BlenderbotConfig.from_json_file(__lowerCAmelCase )
snake_case = BlenderbotForConditionalGeneration(__lowerCAmelCase )
snake_case = m.model.state_dict().keys()
snake_case = []
snake_case = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
snake_case = rename_state_dict_key(__lowerCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
snake_case = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCAmelCase )
m.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
m.half()
m.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 3
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = 0
def lowerCAmelCase ( self : str )-> Any:
snake_case = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case = AutoImageProcessor.from_pretrained(__snake_case ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case = CLIPImageProcessor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
snake_case = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Dict:
with self.assertRaisesRegex(
__snake_case , """clip-base is not a local folder and is not a valid model identifier""" ):
snake_case = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCAmelCase ( self : Tuple )-> int:
with self.assertRaisesRegex(
__snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case = AutoImageProcessor.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCAmelCase ( self : str )-> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : List[str] )-> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCAmelCase ( self : List[str] )-> Dict:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoImageProcessor.register(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Dict )-> Optional[int]:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = True
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__snake_case , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 3
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a :List[str] = (3, 9, -11, 0, 7, 5, 1, -1)
a :List[str] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :int
_SCREAMING_SNAKE_CASE :Node | None
class __a :
'''simple docstring'''
def __init__( self , _a ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Node | None = None
for i in sorted(_a , reverse=_a ):
SCREAMING_SNAKE_CASE__ : List[str] = Node(_a , self.head )
def __iter__( self ) -> Iterator[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE__ : Optional[int] = node.next_node
def __len__( self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self ) -> str:
"""simple docstring"""
return " -> ".join([str(_a ) for node in self] )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> SortedLinkedList:
return SortedLinkedList(list(__lowerCAmelCase ) + list(__lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a :int = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 132
|
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class __a (unittest.TestCase):
'''simple docstring'''
def __init__( self , _a , _a = True , _a = None , _a = 32 , _a = True , _a = 1 / 255 , _a = True , _a = True , _a = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _a = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _a = True , _a=7 , _a=30 , _a=400 , _a=3 , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : Tuple = do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 288}
SCREAMING_SNAKE_CASE__ : List[str] = size_divisor
SCREAMING_SNAKE_CASE__ : Tuple = do_rescale
SCREAMING_SNAKE_CASE__ : List[str] = rescale_factor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_mean
SCREAMING_SNAKE_CASE__ : List[str] = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Dict = min_resolution
SCREAMING_SNAKE_CASE__ : str = max_resolution
def _a ( self ) -> List[str]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _a ( self , _a , _a=False ) -> int:
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE__ : List[Any] = self.size["""shortest_edge"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_inputs[0]
if isinstance(_a , Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = image.size
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Tuple = size / min(_a , _a )
if h < w:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = size, scale * w
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((1_333 / 800) * size )
if max(_a , _a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[str] = max_size / max(_a , _a )
SCREAMING_SNAKE_CASE__ : Any = newh * scale
SCREAMING_SNAKE_CASE__ : Any = neww * scale
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : Dict = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Any = max(_a , key=lambda _a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Any = max(_a , key=lambda _a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a (UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = BridgeTowerImageProcessor if is_vision_available() else None
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = BridgeTowerImageProcessingTester(self )
@property
def _a ( self ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """size_divisor""" ) )
def _a ( self ) -> List[str]:
"""simple docstring"""
pass
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(_a , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Dict = image_processing(_a , return_tensors="""pt""" ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 132
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335
|
# flake8: noqa
# Lint as: python3
_UpperCamelCase = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 335
| 1
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 81
|
from math import isclose, sqrt
def a_ ( _A , _A , _A ) -> tuple[float, float, float]:
"""simple docstring"""
snake_case__ = point_y / 4 / point_x
snake_case__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
snake_case__ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
snake_case__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
snake_case__ = outgoing_gradient**2 + 4
snake_case__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
snake_case__ = (point_y - outgoing_gradient * point_x) ** 2 - 100
snake_case__ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
snake_case__ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
snake_case__ = x_minus if isclose(_A , _A ) else x_plus
snake_case__ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a_ ( _A = 1.4 , _A = -9.6 ) -> int:
"""simple docstring"""
snake_case__ = 0
snake_case__ = first_x_coord
snake_case__ = first_y_coord
snake_case__ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
snake_case__ , snake_case__ , snake_case__ = next_point(_A , _A , _A )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 307
| 0
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCAmelCase__ ( nn.Module ):
__a = 42
__a = jnp.floataa
def lowercase ( self : Union[str, Any] ):
_snake_case = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , _lowerCamelCase : Any ):
_snake_case = hidden_states.shape
_snake_case = jax.image.resize(
_a , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
_snake_case = self.conv(_a )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
__a = 42
__a = jnp.floataa
def lowercase ( self : Tuple ):
_snake_case = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , _lowerCamelCase : str ):
_snake_case = self.conv(_a )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
__a = 42
__a = None
__a = 0.0
__a = None
__a = jnp.floataa
def lowercase ( self : Optional[Any] ):
_snake_case = self.in_channels if self.out_channels is None else self.out_channels
_snake_case = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_snake_case = nn.Conv(
_a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_snake_case = nn.Dense(_a , dtype=self.dtype )
_snake_case = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_snake_case = nn.Dropout(self.dropout_prob )
_snake_case = nn.Conv(
_a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_snake_case = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_snake_case = None
if use_nin_shortcut:
_snake_case = nn.Conv(
_a , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : str=True ):
_snake_case = hidden_states
_snake_case = self.norma(_a )
_snake_case = nn.swish(_a )
_snake_case = self.conva(_a )
_snake_case = self.time_emb_proj(nn.swish(_a ) )
_snake_case = jnp.expand_dims(jnp.expand_dims(_a , 1 ) , 1 )
_snake_case = hidden_states + temb
_snake_case = self.norma(_a )
_snake_case = nn.swish(_a )
_snake_case = self.dropout(_a , _a )
_snake_case = self.conva(_a )
if self.conv_shortcut is not None:
_snake_case = self.conv_shortcut(_a )
return hidden_states + residual
| 361
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
UpperCAmelCase__ = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
UpperCAmelCase__ = model.state_dict()
UpperCAmelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase__ = state_dict[F"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
UpperCAmelCase__ = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"]
UpperCAmelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
UpperCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
UpperCAmelCase__ = state_dict['cls.predictions.decoder.weight']
UpperCAmelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase__ = state_dict[F"cls.predictions.transform.dense.{w}"]
UpperCAmelCase__ = state_dict[F"cls.predictions.transform.LayerNorm.{w}"]
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 40
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : str ={
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = """git_vision_model"""
def __init__( self , __lowercase=7_6_8 , __lowercase=3_0_7_2 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3 , __lowercase=2_2_4 , __lowercase=1_6 , __lowercase="quick_gelu" , __lowercase=1e-5 , __lowercase=0.0 , __lowercase=0.02 , **__lowercase , ) -> int:
super().__init__(**__lowercase )
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : Tuple = num_hidden_layers
lowerCAmelCase_ : Optional[Any] = num_attention_heads
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : int = patch_size
lowerCAmelCase_ : Any = image_size
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : str = attention_dropout
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = hidden_act
@classmethod
def lowercase_ ( cls , __lowercase , **__lowercase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowercase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
lowerCAmelCase_ : Any = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowercase , **__lowercase )
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = """git"""
def __init__( self , __lowercase=None , __lowercase=3_0_5_2_2 , __lowercase=7_6_8 , __lowercase=6 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1_0_2_4 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=0 , __lowercase="absolute" , __lowercase=True , __lowercase=False , __lowercase=1_0_1 , __lowercase=1_0_2 , __lowercase=None , **__lowercase , ) -> str:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , pad_token_id=__lowercase , **__lowercase )
if vision_config is None:
lowerCAmelCase_ : Tuple = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
lowerCAmelCase_ : Optional[int] = GitVisionConfig(**__lowercase )
lowerCAmelCase_ : Any = vocab_size
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Optional[int] = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = position_embedding_type
lowerCAmelCase_ : Union[str, Any] = use_cache
lowerCAmelCase_ : Dict = tie_word_embeddings
lowerCAmelCase_ : List[str] = num_image_with_embedding
lowerCAmelCase_ : List[str] = bos_token_id
lowerCAmelCase_ : str = eos_token_id
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : int = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Dict = self.vision_config.to_dict()
lowerCAmelCase_ : Tuple = self.__class__.model_type
return output
| 262
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCAmelCase : Dict ={
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ernie_m"""
SCREAMING_SNAKE_CASE__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowercase = 2_5_0_0_0_2 , __lowercase = 7_6_8 , __lowercase = 1_2 , __lowercase = 1_2 , __lowercase = 3_0_7_2 , __lowercase = "gelu" , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 5_1_4 , __lowercase = 0.02 , __lowercase = 1 , __lowercase = 1e-05 , __lowercase=None , __lowercase=False , __lowercase=0.0 , **__lowercase , ) -> Tuple:
super().__init__(pad_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Tuple = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : List[Any] = classifier_dropout
lowerCAmelCase_ : Any = is_decoder
lowerCAmelCase_ : List[Any] = act_dropout
| 262
| 1
|
import string
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Any = ''''''
for i in sequence:
UpperCAmelCase__ : List[Any] = ord(lowerCAmelCase__ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : List[Any] = string.ascii_letters
UpperCAmelCase__ : Tuple = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase__ )] if c in letters else c for c in sequence )
def a__ ( ) -> None:
from timeit import timeit
print('''Running performance benchmarks...''' )
UpperCAmelCase__ : Dict = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(F"""> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowerCAmelCase__ )} seconds""" )
print(F"""> atbash(): {timeit("atbash(printable)" , setup=lowerCAmelCase__ )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 352
|
'''simple docstring'''
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = n
UpperCAmelCase__ : Union[str, Any] = [None] * self.n
UpperCAmelCase__ : Tuple = 0 # index of the first element
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : int = 0
def __len__( self : Optional[Any] ):
'''simple docstring'''
return self.size
def lowercase_ ( self : Dict ):
'''simple docstring'''
return self.size == 0
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowercase_ ( self : List[Any] , _A : int ):
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
UpperCAmelCase__ : str = data
UpperCAmelCase__ : Optional[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''' )
UpperCAmelCase__ : Any = self.array[self.front]
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Tuple = (self.front + 1) % self.n
self.size -= 1
return temp
| 299
| 0
|
import math
def UpperCAmelCase_( a__ , a__ = 0 , a__ = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = end or len(a__ )
for i in range(a__ , a__ ):
SCREAMING_SNAKE_CASE : int = i
SCREAMING_SNAKE_CASE : str = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
SCREAMING_SNAKE_CASE : List[Any] = array[temp_index - 1]
temp_index -= 1
SCREAMING_SNAKE_CASE : Any = temp_index_value
return array
def UpperCAmelCase_( a__ , a__ , a__ ): # Max Heap
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = index
SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * index + 1 # Left Node
SCREAMING_SNAKE_CASE : int = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
SCREAMING_SNAKE_CASE : Optional[int] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
SCREAMING_SNAKE_CASE : str = right_index
if largest != index:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = array[largest], array[index]
heapify(a__ , a__ , a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = len(a__ )
for i in range(n // 2 , -1 , -1 ):
heapify(a__ , a__ , a__ )
for i in range(n - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = array[0], array[i]
heapify(a__ , 0 , a__ )
return array
def UpperCAmelCase_( a__ , a__ , a__ , a__ ):
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def UpperCAmelCase_( a__ , a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = low
SCREAMING_SNAKE_CASE : Any = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = array[j], array[i]
i += 1
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if len(a__ ) == 0:
return array
SCREAMING_SNAKE_CASE : Dict = 2 * math.ceil(math.loga(len(a__ ) ) )
SCREAMING_SNAKE_CASE : Any = 16
return intro_sort(a__ , 0 , len(a__ ) , a__ , a__ )
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(a__ )
max_depth -= 1
SCREAMING_SNAKE_CASE : List[str] = median_of_a(a__ , a__ , start + ((end - start) // 2) + 1 , end - 1 )
SCREAMING_SNAKE_CASE : int = partition(a__ , a__ , a__ , a__ )
intro_sort(a__ , a__ , a__ , a__ , a__ )
SCREAMING_SNAKE_CASE : Any = p
return insertion_sort(a__ , a__ , a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Optional[Any] = input('''Enter numbers separated by a comma : ''').strip()
a__ : str = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 313
|
from abc import ABC, abstractmethod
from typing import List, Optional
class a_ ( a__ ):
"""simple docstring"""
def __init__( self ) ->List[str]:
# test for the above condition
self.test()
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : List[Any] = self.advance()
if not self.does_advance(_lowerCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[int]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Union[str, Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->int:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
SCREAMING_SNAKE_CASE : Optional[Any] = token_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids )
SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self ) ->List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
if self.does_advance(_lowerCamelCase ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : str = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Union[str, Any] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __lowerCAmelCase ( self ) ->Any:
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict:
SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Dict = self.seqlen
SCREAMING_SNAKE_CASE : int = self.fulfilled_idx
SCREAMING_SNAKE_CASE : Tuple = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict:
SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : List[str] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = root
for tidx, token_id in enumerate(_lowerCamelCase ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Tuple = level[token_id]
if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = root
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : int = start[current_token]
SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase )
return len(_lowerCamelCase ) == 0
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = list(root.values() )
if len(_lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase )
return len(_lowerCamelCase ) != leaf_count
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = nested_token_ids
SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if self.does_advance(_lowerCamelCase ):
self.current_seq.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : List[Any] = completed
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = []
def __lowerCAmelCase ( self ) ->Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]:
SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : int = self.current_seq
SCREAMING_SNAKE_CASE : Optional[int] = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = False
self.init_state()
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints]
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[int] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : str = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : str = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str:
SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : str = [
constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 313
| 1
|
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> str:
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
_lowerCAmelCase =""""""
while len(__UpperCamelCase ) % 3 != 0:
_lowerCAmelCase ="""0""" + bin_string
_lowerCAmelCase =[
bin_string[index : index + 3]
for index in range(len(__UpperCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_lowerCAmelCase =0
for index, val in enumerate(__UpperCamelCase ):
oct_val += int(2 ** (2 - index) * int(__UpperCamelCase ) )
oct_string += str(__UpperCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 341
|
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =decoder_seq_length
# For common tests
_lowerCAmelCase =self.decoder_seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_attention_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =d_model
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_ffn_dim
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =decoder_start_token_id
_lowerCAmelCase =use_cache
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =None
_lowerCAmelCase =decoder_seq_length
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_attention_mask:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
_lowerCAmelCase =True
_lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
_lowerCAmelCase =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_lowerCAmelCase =outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""]
_lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""]
# select random slice
_lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase = True
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> List[Any]:
pass
def _lowerCAmelCase ( self ) -> Any:
pass
def _lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _lowerCAmelCase ( self ) -> str:
pass
| 341
| 1
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase__ = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = "gelu"
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> Any:
_A : Optional[int] = parent
_A : str = batch_size
_A : Dict = seq_length
_A : Any = is_training
_A : Any = use_labels
_A : Tuple = vocab_size
_A : Any = hidden_size
_A : Tuple = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : str = intermediate_size
_A : int = hidden_dropout_prob
_A : List[str] = attention_probs_dropout_prob
_A : Any = max_position_embeddings
_A : Any = eos_token_id
_A : int = pad_token_id
_A : Optional[Any] = bos_token_id
def _lowerCamelCase ( self) -> Any:
_A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
_A : int = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
_A : List[str] = np.concatenate([input_ids, eos_tensor] , axis=1)
_A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_A : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_A : Dict = prepare_pegasus_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
return config, inputs_dict
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : List[str] = 2_0
_A : Optional[Any] = model_class_name(__lowerCamelCase)
_A : str = model.encode(inputs_dict["input_ids"])
_A , _A : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_A : Tuple = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase)
_A : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
_A : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_A : str = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
_A : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
_A : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCamelCase , )
_A : List[str] = model.decode(__lowerCamelCase , __lowerCamelCase)
_A : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}")
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[Any]:
_A : Union[str, Any] = 2_0
_A : Union[str, Any] = model_class_name(__lowerCamelCase)
_A : str = model.encode(inputs_dict["input_ids"])
_A , _A : str = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_A : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
_A : Tuple = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase)
_A : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_A : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
_A : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
_A : List[str] = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
_A : Optional[int] = model.decode(__lowerCamelCase , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase)
_A : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}")
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , ):
if attention_mask is None:
_A : Tuple = np.not_equal(UpperCamelCase__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_A : List[Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = FlaxPegasusModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self) -> str:
_A , _A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : Union[str, Any] = model_class(__lowerCamelCase)
@jax.jit
def encode_jitted(__lowerCamelCase , __lowerCamelCase=None , **__lowerCamelCase):
return model.encode(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase)
with self.subTest("JIT Enabled"):
_A : Any = encode_jitted(**__lowerCamelCase).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
_A : Union[str, Any] = encode_jitted(**__lowerCamelCase).to_tuple()
self.assertEqual(len(__lowerCamelCase) , len(__lowerCamelCase))
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase):
self.assertEqual(jitted_output.shape , output.shape)
def _lowerCamelCase ( self) -> Dict:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_A : int = model_class(__lowerCamelCase)
_A : Dict = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
_A : Union[str, Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase):
return model.decode(
decoder_input_ids=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , encoder_outputs=__lowerCamelCase , )
with self.subTest("JIT Enabled"):
_A : List[str] = decode_jitted(**__lowerCamelCase).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
_A : Optional[int] = decode_jitted(**__lowerCamelCase).to_tuple()
self.assertEqual(len(__lowerCamelCase) , len(__lowerCamelCase))
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _lowerCamelCase ( self) -> List[str]:
for model_class_name in self.all_model_classes:
_A : Tuple = model_class_name.from_pretrained("google/pegasus-large" , from_pt=__lowerCamelCase)
_A : Union[str, Any] = np.ones((1, 1))
_A : Optional[int] = model(__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
@slow
def _lowerCamelCase ( self) -> Tuple:
_A : Optional[Any] = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum")
_A : str = PegasusTokenizer.from_pretrained("google/pegasus-xsum")
_A : Any = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_A : List[Any] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
_A : Any = tokenizer(__lowerCamelCase , return_tensors="np" , truncation=__lowerCamelCase , max_length=5_1_2 , padding=__lowerCamelCase)
_A : List[Any] = model.generate(**__lowerCamelCase , num_beams=2).sequences
_A : Optional[Any] = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase)
assert tgt_text == decoded
| 11
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11
| 1
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
SCREAMING_SNAKE_CASE_:str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
super().__init__()
A : Optional[int] = torchvision.models.resnetaaa(pretrained=lowerCamelCase__ )
A : List[Any] = list(model.children() )[:-2]
A : Tuple = nn.Sequential(*lowerCamelCase__ )
A : Dict = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
A : Dict = self.pool(self.model(lowerCamelCase__ ) )
A : List[str] = torch.flatten(lowerCamelCase__, start_dim=2 )
A : str = out.transpose(1, 2 ).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Optional[Any] = [json.loads(lowerCamelCase__ ) for l in open(lowerCamelCase__ )]
A : str = os.path.dirname(lowerCamelCase__ )
A : List[str] = tokenizer
A : Any = labels
A : Optional[int] = len(lowerCamelCase__ )
A : str = max_seq_length
A : List[Any] = transforms
def __len__( self ):
return len(self.data )
def __getitem__( self, lowerCamelCase__ ):
A : Any = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""], add_special_tokens=lowerCamelCase__ ) )
A : str = sentence[0], sentence[1:-1], sentence[-1]
A : Union[str, Any] = sentence[: self.max_seq_length]
A : str = torch.zeros(self.n_classes )
A : Union[str, Any] = 1
A : Optional[Any] = Image.open(os.path.join(self.data_dir, self.data[index]["""img"""] ) ).convert("""RGB""" )
A : Tuple = self.transforms(lowerCamelCase__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _lowerCAmelCase ( self ):
A : Any = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def __UpperCamelCase ( _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
A : int = [len(row["""sentence"""] ) for row in batch]
A : Dict = len(_lowerCAmelCase ), max(_lowerCAmelCase )
A : Dict = torch.zeros(_lowerCAmelCase , _lowerCAmelCase , dtype=torch.long )
A : Optional[Any] = torch.zeros(_lowerCAmelCase , _lowerCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_lowerCAmelCase , _lowerCAmelCase ) ):
A : str = input_row["""sentence"""]
A : List[Any] = 1
A : Optional[int] = torch.stack([row["""image"""] for row in batch] )
A : List[Any] = torch.stack([row["""label"""] for row in batch] )
A : List[str] = torch.stack([row["""image_start_token"""] for row in batch] )
A : int = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 353
|
from random import randint, random
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = 5 , ) -> list:
"""simple docstring"""
A : Any = [[-1] * number_of_cells] # Create a highway without any car
A : Tuple = 0
A : Dict = max(_lowerCAmelCase , 0 )
while i < number_of_cells:
A : Any = (
randint(0 , _lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
A : List[str] = 0
A : Dict = highway_now[car_index + 1 :]
for cell in range(len(_lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_lowerCAmelCase , -1 )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> list:
"""simple docstring"""
A : str = len(_lowerCAmelCase )
# Beforce calculations, the highway is empty
A : Any = [-1] * number_of_cells
for car_index in range(_lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
A : str = min(highway_now[car_index] + 1 , _lowerCAmelCase )
# Number of empty cell before the next car
A : Optional[int] = get_distance(_lowerCAmelCase , _lowerCAmelCase ) - 1
# We can't have the car causing an accident
A : Any = min(next_highway[car_index] , _lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
A : Optional[Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> list:
"""simple docstring"""
A : Any = len(highway[0] )
for i in range(_lowerCAmelCase ):
A : Optional[int] = update(highway[i] , _lowerCAmelCase , _lowerCAmelCase )
A : Tuple = [-1] * number_of_cells
for car_index in range(_lowerCAmelCase ):
A : Dict = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
A : Optional[int] = (car_index + speed) % number_of_cells
# Commit the change of position
A : Dict = speed
highway.append(_lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 115
| 0
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE ={}
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase_ : Any = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase_ : Optional[int] = _calculate(days - 1 , __SCREAMING_SNAKE_CASE , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase_ : Any = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase_ : Dict = _calculate(days - 1 , __SCREAMING_SNAKE_CASE , 0 )
lowercase_ : str = state_late + state_absent + state_ontime
lowercase_ : Tuple = prizestrings
return prizestrings
def lowercase__( __SCREAMING_SNAKE_CASE : int = 30 ):
return _calculate(__SCREAMING_SNAKE_CASE , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 213
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCamelCase ( yaml.SafeLoader ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase_ : str = [tuple(__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else key for key in keys]
lowercase_ : List[Any] = Counter(__UpperCamelCase )
lowercase_ : str = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=False ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = super().construct_mapping(__UpperCamelCase ,deep=__UpperCamelCase )
self._check_no_duplicates_on_constructed_node(__UpperCamelCase )
return mapping
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase_ : Dict = full_content[1:].index('---' ) + 1
lowercase_ : Optional[int] = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__SCREAMING_SNAKE_CASE )
class UpperCamelCase ( lowercase_ ):
# class attributes
lowercase = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> "DatasetMetadata":
'''simple docstring'''
with open(__UpperCamelCase ,encoding='utf-8' ) as readme_file:
lowercase_ , lowercase_ : Optional[int] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__UpperCamelCase )
else:
return cls()
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if path.exists():
with open(__UpperCamelCase ,encoding='utf-8' ) as readme_file:
lowercase_ : Dict = readme_file.read()
else:
lowercase_ : int = None
lowercase_ : Any = self._to_readme(__UpperCamelCase )
with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as readme_file:
readme_file.write(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase = None ) -> str:
'''simple docstring'''
if readme_content is not None:
lowercase_ , lowercase_ : Optional[Any] = _split_yaml_from_readme(__UpperCamelCase )
lowercase_ : Optional[Any] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
lowercase_ : Tuple = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> "DatasetMetadata":
'''simple docstring'''
lowercase_ : List[str] = yaml.load(__UpperCamelCase ,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase_ : Dict = {
(key.replace('-' ,'_' ) if key.replace('-' ,'_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('_' ,'-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} ,sort_keys=__UpperCamelCase ,allow_unicode=__UpperCamelCase ,encoding='utf-8' ,).decode('utf-8' )
__SCREAMING_SNAKE_CASE ={
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__SCREAMING_SNAKE_CASE =ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
__SCREAMING_SNAKE_CASE =ap.parse_args()
__SCREAMING_SNAKE_CASE =Path(args.readme_filepath)
__SCREAMING_SNAKE_CASE =DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 213
| 1
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A__ : Optional[int] ='''pt'''
elif is_tf_available():
A__ : List[str] ='''tf'''
else:
A__ : str ='''jax'''
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
_lowercase: int = ByTaTokenizer
_lowercase: Union[str, Any] = False
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
super().setUp()
_lowerCAmelCase = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self : Optional[int] ) -> Any:
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def lowercase__ ( self : Dict , **__snake_case : Optional[Any] ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase__ ( self : Optional[int] , __snake_case : List[Any] , __snake_case : Tuple=False , __snake_case : Any=20 , __snake_case : List[str]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_lowerCAmelCase = []
for i in range(len(__snake_case ) ):
try:
_lowerCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=__snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowerCAmelCase = list(filter(lambda __snake_case : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , __snake_case ) )
_lowerCAmelCase = list(filter(lambda __snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__snake_case ) , __snake_case ) )
if max_length is not None and len(__snake_case ) > max_length:
_lowerCAmelCase = toks[:max_length]
if min_length is not None and len(__snake_case ) < min_length and len(__snake_case ) > 0:
while len(__snake_case ) < min_length:
_lowerCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
_lowerCAmelCase = [t[0] for t in toks]
# Ensure consistency
_lowerCAmelCase = tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case )
if " " not in output_txt and len(__snake_case ) > 1:
_lowerCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__snake_case )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__snake_case )
)
if with_prefix_space:
_lowerCAmelCase = """ """ + output_txt
_lowerCAmelCase = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
return output_txt, output_ids
def lowercase__ ( self : List[str] ) -> Optional[int]:
_lowerCAmelCase = self.ta_base_tokenizer
_lowerCAmelCase = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_lowerCAmelCase = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def lowercase__ ( self : Any ) -> Tuple:
_lowerCAmelCase = self.ta_base_tokenizer
_lowerCAmelCase = """Unicode €."""
_lowerCAmelCase = tokenizer(__snake_case )
_lowerCAmelCase = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded["""input_ids"""] , __snake_case )
# decoding
_lowerCAmelCase = tokenizer.decode(__snake_case )
self.assertEqual(__snake_case , """Unicode €.</s>""" )
_lowerCAmelCase = tokenizer("""e è é ê ë""" )
_lowerCAmelCase = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded["""input_ids"""] , __snake_case )
# decoding
_lowerCAmelCase = tokenizer.decode(__snake_case )
self.assertEqual(__snake_case , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def lowercase__ ( self : List[Any] ) -> Dict:
_lowerCAmelCase = self.ta_base_tokenizer
_lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_lowerCAmelCase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
_lowerCAmelCase = tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
if FRAMEWORK != "jax":
_lowerCAmelCase = list(batch.input_ids.numpy()[0] )
else:
_lowerCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__snake_case , __snake_case )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def lowercase__ ( self : Any ) -> Optional[Any]:
_lowerCAmelCase = self.ta_base_tokenizer
_lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowerCAmelCase = tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , __snake_case )
self.assertIn("""attention_mask""" , __snake_case )
self.assertNotIn("""decoder_input_ids""" , __snake_case )
self.assertNotIn("""decoder_attention_mask""" , __snake_case )
def lowercase__ ( self : str ) -> Dict:
_lowerCAmelCase = self.ta_base_tokenizer
_lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
_lowerCAmelCase = tokenizer(
text_target=__snake_case , max_length=32 , padding="""max_length""" , truncation=__snake_case , return_tensors=__snake_case )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowercase__ ( self : List[Any] ) -> Any:
_lowerCAmelCase = self.ta_base_tokenizer
_lowerCAmelCase = ["""A long paragraph for summarization. </s>"""]
_lowerCAmelCase = ["""Summary of the text. </s>"""]
# fmt: off
_lowerCAmelCase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
_lowerCAmelCase = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
_lowerCAmelCase = tokenizer(__snake_case , text_target=__snake_case )
self.assertEqual(__snake_case , batch["""input_ids"""][0] )
self.assertEqual(__snake_case , batch["""labels"""][0] )
def lowercase__ ( self : Optional[int] ) -> Tuple:
# safety check on max_len default value so we are sure the test works
_lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = """ He is very happy, UNwant\u00E9d,running"""
_lowerCAmelCase = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
tokenizer.save_pretrained(__snake_case )
_lowerCAmelCase = tokenizer.__class__.from_pretrained(__snake_case )
_lowerCAmelCase = after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
shutil.rmtree(__snake_case )
_lowerCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_lowerCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_lowerCAmelCase = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
tokenizer.save_pretrained(__snake_case )
_lowerCAmelCase = tokenizer.__class__.from_pretrained(__snake_case )
_lowerCAmelCase = after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowerCAmelCase = tokenizer.__class__.from_pretrained(__snake_case , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__snake_case )
def lowercase__ ( self : Union[str, Any] ) -> Dict:
_lowerCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__snake_case )
with open(os.path.join(__snake_case , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
_lowerCAmelCase = json.load(__snake_case )
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
_lowerCAmelCase = json.load(__snake_case )
_lowerCAmelCase = [f"<extra_id_{i}>" for i in range(1_25 )]
_lowerCAmelCase = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_lowerCAmelCase = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(__snake_case , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__snake_case , __snake_case )
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__snake_case , __snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowerCAmelCase = tokenizer_class.from_pretrained(
__snake_case , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowerCAmelCase = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=__snake_case )]
_lowerCAmelCase = tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
_lowerCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__snake_case )
_lowerCAmelCase = tokenizer_class.from_pretrained(__snake_case )
self.assertTrue(tokenizer.decode([2_55] ) == """""" )
def lowercase__ ( self : List[Any] ) -> List[Any]:
pass
def lowercase__ ( self : Union[str, Any] ) -> str:
pass
def lowercase__ ( self : Union[str, Any] ) -> Dict:
pass
def lowercase__ ( self : Tuple ) -> List[Any]:
pass
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
_lowerCAmelCase = self.get_tokenizers(fast=__snake_case , do_lower_case=__snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_lowerCAmelCase = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_lowerCAmelCase = tokenizer.convert_tokens_to_string(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowercase__ ( self : List[Any] ) -> Dict:
_lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_lowerCAmelCase = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_lowerCAmelCase = 0
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(
__snake_case , skip_special_tokens=__snake_case )
for attr in attributes_list:
setattr(__snake_case , attr + """_id""" , __snake_case )
self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case )
self.assertEqual(getattr(__snake_case , attr + """_id""" ) , __snake_case )
setattr(__snake_case , attr + """_id""" , __snake_case )
self.assertEqual(getattr(__snake_case , __snake_case ) , __snake_case )
self.assertEqual(getattr(__snake_case , attr + """_id""" ) , __snake_case )
setattr(__snake_case , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__snake_case , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__snake_case , """additional_special_tokens_ids""" ) , [] )
setattr(__snake_case , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(__snake_case , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(__snake_case , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 220
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(snake_case_ ) , '''Tatoeba directory does not exist.''' )
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : int ) -> Any:
_lowerCAmelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__snake_case )
@slow
def lowercase__ ( self : Dict ) -> int:
self.resolver.convert_models(["""heb-eng"""] )
@slow
def lowercase__ ( self : Optional[int] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=__snake_case )
assert mmeta["long_pair"] == "heb-eng"
| 220
| 1
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = PegasusTokenizer
__lowercase : int = PegasusTokenizerFast
__lowercase : str = True
__lowercase : List[Any] = True
def UpperCAmelCase_ ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : Optional[Any] = PegasusTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase_ ( self ) -> Tuple:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[Any]:
return ("This is a test", "This is a test")
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : str = """</s>"""
lowerCAmelCase__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) ,__UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<pad>""" )
self.assertEqual(vocab_keys[1] ,"""</s>""" )
self.assertEqual(vocab_keys[-1] ,"""v""" )
self.assertEqual(len(__UpperCAmelCase ) ,1103 )
def UpperCAmelCase_ ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size ,1103 )
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : List[Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
lowerCAmelCase__ : List[str] = rust_tokenizer([raw_input_str] ,return_tensors=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ).input_ids[0]
lowerCAmelCase__ : str = py_tokenizer([raw_input_str] ,return_tensors=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : List[str] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase__ : Union[str, Any] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase__ : Union[str, Any] = tokenizer([raw_input_str] ,return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowerCAmelCase__ : Any = """To ensure a smooth flow of bank resolutions."""
lowerCAmelCase__ : str = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
lowerCAmelCase__ : Optional[Any] = tokenizer([raw_input_str] ,return_tensors=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : str = ["""This is going to be way too long.""" * 150, """short example"""]
lowerCAmelCase__ : List[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ : str = self._large_tokenizer(__UpperCAmelCase ,padding=__UpperCAmelCase ,truncation=__UpperCAmelCase ,return_tensors="""pt""" )
lowerCAmelCase__ : Dict = self._large_tokenizer(
text_target=__UpperCAmelCase ,max_length=5 ,padding=__UpperCAmelCase ,truncation=__UpperCAmelCase ,return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase_ ( self ) -> str:
# fmt: off
lowerCAmelCase__ : Any = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase ,model_name="""google/bigbird-pegasus-large-arxiv""" ,revision="""ba85d0851d708441f91440d509690f1ab6353415""" ,)
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = PegasusTokenizer
__lowercase : List[Any] = PegasusTokenizerFast
__lowercase : Union[str, Any] = True
__lowercase : Optional[int] = True
def UpperCAmelCase_ ( self ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase ,offset=0 ,mask_token_sent=__UpperCAmelCase ,mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase_ ( self ) -> List[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
return ("This is a test", "This is a test")
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase__ : int = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
lowerCAmelCase__ : str = rust_tokenizer([raw_input_str] ,return_tensors=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ).input_ids[0]
lowerCAmelCase__ : Any = py_tokenizer([raw_input_str] ,return_tensors=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ).input_ids[0]
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
@require_torch
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""]
lowerCAmelCase__ : str = ["""not super long but more than 5 tokens""", """tiny"""]
lowerCAmelCase__ : Optional[int] = self._large_tokenizer(__UpperCAmelCase ,padding=__UpperCAmelCase ,truncation=__UpperCAmelCase ,return_tensors="""pt""" )
lowerCAmelCase__ : Optional[Any] = self._large_tokenizer(
text_target=__UpperCAmelCase ,max_length=5 ,padding=__UpperCAmelCase ,truncation=__UpperCAmelCase ,return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask.
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Any = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
lowerCAmelCase__ : Dict = self._large_tokenizer(__UpperCAmelCase ).input_ids
self.assertListEqual(
__UpperCAmelCase ,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] ,)
| 37
|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = analyze_text(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCAmelCase__ : Optional[int] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase__ : List[Any] = single_char_strings[ch]
lowerCAmelCase__ : List[Any] = my_str / all_sum
my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
lowerCAmelCase__ : Dict = sum(two_char_strings.values() )
lowerCAmelCase__ : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase__ : Union[str, Any] = cha + cha
if sequence in two_char_strings:
lowerCAmelCase__ : Dict = two_char_strings[sequence]
lowerCAmelCase__ : Tuple = int(UpperCamelCase ) / all_sum
my_sec_sum += prob * math.loga(UpperCamelCase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = Counter() # type: ignore
lowerCAmelCase__ : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(UpperCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 37
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = 384
UpperCAmelCase_ : Optional[int] = 7
if "tiny" in model_name:
UpperCAmelCase_ : Dict = 96
UpperCAmelCase_ : Any = (2, 2, 6, 2)
UpperCAmelCase_ : str = (3, 6, 12, 24)
elif "small" in model_name:
UpperCAmelCase_ : Optional[int] = 96
UpperCAmelCase_ : Union[str, Any] = (2, 2, 18, 2)
UpperCAmelCase_ : Any = (3, 6, 12, 24)
elif "base" in model_name:
UpperCAmelCase_ : List[Any] = 128
UpperCAmelCase_ : List[Any] = (2, 2, 18, 2)
UpperCAmelCase_ : Optional[Any] = (4, 8, 16, 32)
UpperCAmelCase_ : Optional[Any] = 12
UpperCAmelCase_ : int = 512
elif "large" in model_name:
UpperCAmelCase_ : List[Any] = 192
UpperCAmelCase_ : int = (2, 2, 18, 2)
UpperCAmelCase_ : List[str] = (6, 12, 24, 48)
UpperCAmelCase_ : Tuple = 12
UpperCAmelCase_ : int = 768
# set label information
UpperCAmelCase_ : Dict = 150
UpperCAmelCase_ : Dict = '''huggingface/label-files'''
UpperCAmelCase_ : str = '''ade20k-id2label.json'''
UpperCAmelCase_ : List[str] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ : Any = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = SwinConfig(
embed_dim=_lowercase , depths=_lowercase , num_heads=_lowercase , window_size=_lowercase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
UpperCAmelCase_ : str = UperNetConfig(
backbone_config=_lowercase , auxiliary_in_channels=_lowercase , num_labels=_lowercase , idalabel=_lowercase , labelaid=_lowercase , )
return config
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = dct.pop(_lowercase )
UpperCAmelCase_ : Union[str, Any] = val
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ : List[str] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
UpperCAmelCase_ : Any = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : int = in_proj_weight[:dim, :]
UpperCAmelCase_ : Optional[int] = in_proj_bias[: dim]
UpperCAmelCase_ : int = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ : str = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ : Any = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ : int = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_ : str = x.shape
UpperCAmelCase_ : Dict = x.reshape(_lowercase , 4 , in_channel // 4 )
UpperCAmelCase_ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_lowercase , _lowercase )
return x
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_ : int = x.shape
UpperCAmelCase_ : Optional[int] = x.reshape(_lowercase , in_channel // 4 , 4 )
UpperCAmelCase_ : Dict = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_lowercase , _lowercase )
return x
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = x.shape[0]
UpperCAmelCase_ : Optional[int] = x.reshape(4 , in_channel // 4 )
UpperCAmelCase_ : Dict = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_lowercase )
return x
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = x.shape[0]
UpperCAmelCase_ : int = x.reshape(in_channel // 4 , 4 )
UpperCAmelCase_ : Any = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_lowercase )
return x
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : int = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
UpperCAmelCase_ : List[Any] = model_name_to_url[model_name]
UpperCAmelCase_ : List[str] = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' , file_name=_lowercase )[
'''state_dict'''
]
for name, param in state_dict.items():
print(_lowercase , param.shape )
UpperCAmelCase_ : List[Any] = get_upernet_config(_lowercase )
UpperCAmelCase_ : Union[str, Any] = UperNetForSemanticSegmentation(_lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase_ : Dict = state_dict.pop(_lowercase )
if "bn" in key:
UpperCAmelCase_ : Optional[int] = key.replace('''bn''' , '''batch_norm''' )
UpperCAmelCase_ : Dict = val
# rename keys
UpperCAmelCase_ : Dict = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
read_in_q_k_v(_lowercase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
UpperCAmelCase_ : int = reverse_correct_unfold_reduction_order(_lowercase )
if "norm" in key:
UpperCAmelCase_ : Any = reverse_correct_unfold_norm_order(_lowercase )
model.load_state_dict(_lowercase )
# verify on image
UpperCAmelCase_ : List[str] = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
UpperCAmelCase_ : Any = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('''RGB''' )
UpperCAmelCase_ : Union[str, Any] = SegformerImageProcessor()
UpperCAmelCase_ : Optional[int] = processor(_lowercase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
UpperCAmelCase_ : Dict = model(_lowercase )
UpperCAmelCase_ : Any = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
UpperCAmelCase_ : Any = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
UpperCAmelCase_ : str = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
UpperCAmelCase_ : Any = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
UpperCAmelCase_ : List[Any] = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[F"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__a = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 235
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase_ : str = BlipImageProcessor()
UpperCAmelCase_ : Dict = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
UpperCAmelCase_ : Optional[Any] = BlipaProcessor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE ).tokenizer
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE ).image_processor
def a__ ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
UpperCAmelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
UpperCAmelCase_ : int = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
UpperCAmelCase_ : Union[str, Any] = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : str = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[Any] = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : int = processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def a__ ( self ) -> int:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = '''lower newer'''
UpperCAmelCase_ : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = tokenizer(_SCREAMING_SNAKE_CASE ,return_token_type_ids=_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''lower newer'''
UpperCAmelCase_ : int = self.prepare_image_inputs()
UpperCAmelCase_ : List[str] = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : List[str] = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : List[str] = processor.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Any = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = '''lower newer'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Any = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 235
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.