code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def _lowerCamelCase ( a_ : int): # noqa: E741
lowerCamelCase :List[Any] = len(a_)
lowerCamelCase :List[str] = 0
lowerCamelCase :Union[str, Any] = [0] * n
lowerCamelCase :Optional[int] = [False] * n
lowerCamelCase :Optional[int] = [False] * n
def dfs(a_ : List[str] , a_ : Dict , a_ : Union[str, Any] , a_ : List[str]):
if parent == root:
out_edge_count += 1
lowerCamelCase :List[Any] = True
lowerCamelCase :Optional[Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowerCamelCase :str = dfs(a_ , a_ , a_ , a_)
lowerCamelCase :List[Any] = min(low[at] , low[to])
# AP found via bridge
if at < low[to]:
lowerCamelCase :Optional[int] = True
# AP found via cycle
if at == low[to]:
lowerCamelCase :str = True
else:
lowerCamelCase :Any = min(low[at] , a_)
return out_edge_count
for i in range(a_):
if not visited[i]:
lowerCamelCase :Any = 0
lowerCamelCase :Dict = dfs(a_ , a_ , -1 , a_)
lowerCamelCase :List[str] = out_edge_count > 1
for x in range(len(a_)):
if is_art[x] is True:
print(a_)
# Adjacency list of graph
A__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 711
|
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 712
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ):
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Any = image_size
lowerCamelCase :Union[str, Any] = patch_size
lowerCamelCase :Any = num_channels
lowerCamelCase :List[Any] = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :Any = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :List[str] = hidden_act
lowerCamelCase :List[str] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :Optional[int] = initializer_range
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Any = scope
lowerCamelCase :Union[str, Any] = n_targets
lowerCamelCase :Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] ):
lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase :List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase :Optional[int] = []
for i in range(self.batch_size ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__snake_case )
lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case )
labels.append(__snake_case )
lowerCamelCase :str = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowerCamelCase :int = YolosForObjectDetection(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :str = model(pixel_values=__snake_case )
lowerCamelCase :Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : int ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs
lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_UpperCAmelCase = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ):
lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase :Dict = []
for i in range(self.model_tester.batch_size ):
lowerCamelCase :Optional[Any] = {}
lowerCamelCase :List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long )
lowerCamelCase :str = torch.ones(
self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float )
labels.append(__snake_case )
lowerCamelCase :Union[str, Any] = labels
return inputs_dict
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = YolosModelTester(self )
lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Tuple ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Tuple = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :int = True
# in YOLOS, the seq_len is different
lowerCamelCase :str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase :str = True
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = True
lowerCamelCase :int = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Optional[Any] = True
lowerCamelCase :str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Tuple = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Optional[int] = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :List[Any] = True
lowerCamelCase :Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = 1
self.assertEqual(out_len + added_hidden_states , len(__snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[str] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Optional[Any] = outputs.hidden_states
lowerCamelCase :Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# YOLOS has a different seq_length
lowerCamelCase :List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Any = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCamelCase :int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , )
lowerCamelCase :Any = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify postprocessing
lowerCamelCase :List[str] = image_processor.post_process_object_detection(
__snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case )
lowerCamelCase :str = [75, 75, 17, 63, 17]
lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
| 49
| 0
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _lowerCAmelCase ( datasets.BeamBasedBuilder ):
def snake_case ( self : str ):
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=__snake_case , )
def snake_case ( self : int , __snake_case : Optional[Any] , __snake_case : Optional[Any] ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def snake_case ( self : int , __snake_case : int , __snake_case : Dict ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__snake_case )
class _lowerCAmelCase ( datasets.BeamBasedBuilder ):
def snake_case ( self : Union[str, Any] ):
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=__snake_case , )
def snake_case ( self : Optional[int] , __snake_case : Dict , __snake_case : Union[str, Any] ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def snake_case ( self : int , __snake_case : Optional[Any] , __snake_case : Dict ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__snake_case )
def _lowerCAmelCase ( ):
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''])]
def _lowerCAmelCase ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''])]
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@require_beam
def snake_case ( self : str ):
lowerCamelCase :Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase :List[str] = DummyBeamDataset(cache_dir=__snake_case , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__snake_case , builder.name , '''default''' , '''0.0.0''' , F"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
lowerCamelCase :int = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __snake_case )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __snake_case )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__snake_case , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def snake_case ( self : Optional[int] ):
import apache_beam as beam
lowerCamelCase :Optional[Any] = beam.io.parquetio.WriteToParquet
lowerCamelCase :Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase :Tuple = DummyBeamDataset(cache_dir=__snake_case , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
lowerCamelCase :Optional[Any] = partial(__snake_case , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__snake_case , builder.name , '''default''' , '''0.0.0''' , F"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__snake_case , builder.name , '''default''' , '''0.0.0''' , F"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
lowerCamelCase :int = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __snake_case )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __snake_case )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(__snake_case , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase :Any = DummyBeamDataset(cache_dir=__snake_case )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def snake_case ( self : Optional[int] ):
lowerCamelCase :int = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase :Tuple = NestedBeamDataset(cache_dir=__snake_case , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__snake_case , builder.name , '''default''' , '''0.0.0''' , F"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
lowerCamelCase :Tuple = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __snake_case )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __snake_case )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__snake_case , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 714
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase :Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase :Any = test_metrics
@require_cpu
def snake_case ( self : Dict ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self : int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self : Any ):
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self : Optional[int] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 49
| 0
|
from typing import Any
def _lowerCamelCase ( a_ : list , a_ : list , a_ : dict , a_ : dict , a_ : dict , ):
_validation(
a_ , a_ , a_ , a_ , a_ , )
# Creates data structures and fill initial step
lowerCamelCase :dict = {}
lowerCamelCase :dict = {}
for state in states_space:
lowerCamelCase :Dict = observations_space[0]
lowerCamelCase :Optional[int] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase :str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(a_)):
lowerCamelCase :Dict = observations_space[o]
lowerCamelCase :Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase :Union[str, Any] = ''''''
lowerCamelCase :List[str] = -1
for k_state in states_space:
lowerCamelCase :List[Any] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase :Optional[Any] = probability
lowerCamelCase :Optional[int] = k_state
# Update probabilities and pointers dicts
lowerCamelCase :str = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase :int = arg_max
# The final observation
lowerCamelCase :str = observations_space[len(a_) - 1]
# argmax for given final observation
lowerCamelCase :str = ''''''
lowerCamelCase :Optional[Any] = -1
for k_state in states_space:
lowerCamelCase :Union[str, Any] = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase :str = probability
lowerCamelCase :List[Any] = k_state
lowerCamelCase :Optional[Any] = arg_max
# Process pointers backwards
lowerCamelCase :List[str] = last_state
lowerCamelCase :Any = []
for o in range(len(a_) - 1 , -1 , -1):
result.append(a_)
lowerCamelCase :List[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : Any , a_ : Any , a_ : Any , ):
_validate_not_empty(
a_ , a_ , a_ , a_ , a_ , )
_validate_lists(a_ , a_)
_validate_dicts(
a_ , a_ , a_)
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : Any , a_ : Any , a_ : Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError('''There\'s an empty parameter''')
def _lowerCamelCase ( a_ : Any , a_ : Any):
_validate_list(a_ , '''observations_space''')
_validate_list(a_ , '''states_space''')
def _lowerCamelCase ( a_ : Any , a_ : str):
if not isinstance(_object , a_):
lowerCamelCase :Dict = F"{var_name} must be a list"
raise ValueError(a_)
else:
for x in _object:
if not isinstance(a_ , a_):
lowerCamelCase :List[str] = F"{var_name} must be a list of strings"
raise ValueError(a_)
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : Any , ):
_validate_dict(a_ , '''initial_probabilities''' , a_)
_validate_nested_dict(a_ , '''transition_probabilities''')
_validate_nested_dict(a_ , '''emission_probabilities''')
def _lowerCamelCase ( a_ : Any , a_ : str):
_validate_dict(_object , a_ , a_)
for x in _object.values():
_validate_dict(a_ , a_ , a_ , a_)
def _lowerCamelCase ( a_ : Any , a_ : str , a_ : type , a_ : bool = False):
if not isinstance(_object , a_):
lowerCamelCase :str = F"{var_name} must be a dict"
raise ValueError(a_)
if not all(isinstance(a_ , a_) for x in _object):
lowerCamelCase :Tuple = F"{var_name} all keys must be strings"
raise ValueError(a_)
if not all(isinstance(a_ , a_) for x in _object.values()):
lowerCamelCase :Dict = '''nested dictionary ''' if nested else ''''''
lowerCamelCase :Optional[int] = F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(a_)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 715
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 716
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = LEDTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Any ):
super().setUp()
lowerCamelCase :Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :int = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : int , **__snake_case : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Dict , **__snake_case : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : Any ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def snake_case ( self : int ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''labels''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def snake_case ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.''']
lowerCamelCase :Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' )
lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' )
lowerCamelCase :Optional[int] = inputs['''input_ids''']
lowerCamelCase :Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']]
lowerCamelCase :str = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 49
| 0
|
from __future__ import annotations
def _lowerCamelCase ( a_ : list , a_ : int):
# Checks if the entire collection has been sorted
if len(a_) <= 1 or n <= 1:
return
insert_next(a_ , n - 1)
rec_insertion_sort(a_ , n - 1)
def _lowerCamelCase ( a_ : list , a_ : int):
# Checks order between adjacent elements
if index >= len(a_) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowerCamelCase :int = (
collection[index],
collection[index - 1],
)
insert_next(a_ , index + 1)
if __name__ == "__main__":
A__ = input("""Enter integers separated by spaces: """)
A__ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 717
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
def _lowerCamelCase ( a_ : str = "The quick brown fox jumps over the lazy dog" , ):
lowerCamelCase :Dict = set()
# Replace all the whitespace in our sentence
lowerCamelCase :int = input_str.replace(''' ''' , '''''')
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(a_) == 26
def _lowerCamelCase ( a_ : str = "The quick brown fox jumps over the lazy dog" , ):
lowerCamelCase :Any = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase :List[Any] = True
elif char.isupper():
lowerCamelCase :List[Any] = True
return all(a_)
def _lowerCamelCase ( a_ : str = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def _lowerCamelCase ( ):
from timeit import timeit
lowerCamelCase :Union[str, Any] = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=a_))
print(timeit('''is_pangram_faster()''' , setup=a_))
print(timeit('''is_pangram_fastest()''' , setup=a_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 718
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""PoolFormerFeatureExtractor"""]
A__ = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 719
|
import operator as op
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :int = []
lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation
lowerCamelCase :Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''')
print('''-''' * (30 + len(a_)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_) # append x to stack
# output in tabular format
print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
else:
lowerCamelCase :Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
lowerCamelCase :str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
stack.append(
str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , )
return int(stack[0])
if __name__ == "__main__":
A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 49
| 0
|
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['torch', 'transformers', 'onnx']
def __init__( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : str ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case ( cls : Union[str, Any] , *__snake_case : List[str] , **__snake_case : List[str] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case ( cls : Tuple , *__snake_case : Dict , **__snake_case : Tuple ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *__snake_case : Any , **__snake_case : List[Any] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case ( cls : Any , *__snake_case : str , **__snake_case : int ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case ( cls : Any , *__snake_case : Any , **__snake_case : Tuple ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['torch', 'transformers', 'onnx']
def __init__( self : int , *__snake_case : Dict , **__snake_case : List[str] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case ( cls : str , *__snake_case : Tuple , **__snake_case : Any ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case ( cls : Any , *__snake_case : str , **__snake_case : Any ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['torch', 'transformers', 'onnx']
def __init__( self : Tuple , *__snake_case : Any , **__snake_case : Union[str, Any] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case ( cls : Any , *__snake_case : Union[str, Any] , **__snake_case : Union[str, Any] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case ( cls : int , *__snake_case : Dict , **__snake_case : Union[str, Any] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *__snake_case : Any , **__snake_case : str ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case ( cls : Any , *__snake_case : Tuple , **__snake_case : Dict ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case ( cls : Optional[Any] , *__snake_case : Dict , **__snake_case : str ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class _lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *__snake_case : str , **__snake_case : int ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case ( cls : int , *__snake_case : Tuple , **__snake_case : Optional[Any] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def snake_case ( cls : Dict , *__snake_case : int , **__snake_case : str ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 720
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
A__ = """Hello, World!"""
A__ = """en_XX"""
def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool):
lowerCamelCase :int = Path('''data_bin''')
lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , )
xmod.eval() # disable dropout
print(a_)
lowerCamelCase :Any = xmod.model.encoder.sentence_encoder
lowerCamelCase :List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a_)
lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_)
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight
lowerCamelCase :List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i]
lowerCamelCase :List[str] = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase :Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError('''Dimensions of self-attention weights do not match.''')
lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight
lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias
lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight
lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase :Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''')
lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias
lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase :Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''')
lowerCamelCase :int = xmod_layer.fca.weight
lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias
# output
lowerCamelCase :List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''')
lowerCamelCase :str = xmod_layer.fca.weight
lowerCamelCase :int = xmod_layer.fca.bias
lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight
lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError('''Lists of language adapters do not match.''')
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code]
lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code]
lowerCamelCase :List[Any] = from_adapter.fca.weight
lowerCamelCase :List[Any] = from_adapter.fca.bias
lowerCamelCase :Dict = from_adapter.fca.weight
lowerCamelCase :Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight
lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase :Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(a_)
lowerCamelCase :Any = model(a_)[0]
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_))
else:
lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item()
print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3)
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''')
if not success:
raise Exception('''Something went wRoNg''')
Path(a_).mkdir(parents=a_ , exist_ok=a_)
print(F"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
A__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 49
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Any = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : Union[str, Any]):
lowerCamelCase :Union[str, Any] = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
lowerCamelCase :Dict = DetaConfig(
backbone_config=a_ , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=a_ , with_box_refine=a_ , two_stage=a_ , )
# set labels
lowerCamelCase :Dict = '''huggingface/label-files'''
if "o365" in model_name:
lowerCamelCase :int = 3_66
lowerCamelCase :str = '''object365-id2label.json'''
else:
lowerCamelCase :List[Any] = 91
lowerCamelCase :int = '''coco-detection-id2label.json'''
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Union[str, Any] = json.load(open(cached_download(hf_hub_url(a_ , a_ , repo_type='''dataset''')) , '''r'''))
lowerCamelCase :List[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Dict = idalabel
lowerCamelCase :List[str] = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( a_ : Optional[Any]):
lowerCamelCase :Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias'''))
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight'''))
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias'''))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight"))
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias"))
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight"))
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight"))
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias"))
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight'''))
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias'''))
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight'''))
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias'''))
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight'''))
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias'''))
# transformer encoder
for i in range(config.encoder_layers):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias"))
# transformer decoder
for i in range(config.decoder_layers):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias"))
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Tuple , a_ : List[Any]):
lowerCamelCase :List[Any] = dct.pop(a_)
lowerCamelCase :Tuple = val
def _lowerCamelCase ( a_ : Dict , a_ : Optional[Any]):
lowerCamelCase :Tuple = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
lowerCamelCase :Optional[int] = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase :List[str] = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight")
lowerCamelCase :Dict = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Tuple = in_proj_weight[:dim, :]
lowerCamelCase :List[str] = in_proj_bias[: dim]
lowerCamelCase :Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase :Optional[int] = in_proj_bias[
dim : dim * 2
]
lowerCamelCase :Dict = in_proj_weight[
-dim :, :
]
lowerCamelCase :Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def _lowerCamelCase ( a_ : int , a_ : Optional[Any]):
# transformer decoder self-attention layers
lowerCamelCase :Union[str, Any] = config.d_model
for i in range(config.decoder_layers):
# read in weights + bias of input projection layer of self-attention
lowerCamelCase :Union[str, Any] = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight")
lowerCamelCase :Any = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Dict = in_proj_weight[:hidden_size, :]
lowerCamelCase :Any = in_proj_bias[:hidden_size]
lowerCamelCase :Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
lowerCamelCase :Any = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase :Optional[int] = in_proj_weight[-hidden_size:, :]
lowerCamelCase :Union[str, Any] = in_proj_bias[-hidden_size:]
def _lowerCamelCase ( ):
lowerCamelCase :Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :List[Any] = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Tuple , a_ : Tuple , a_ : Dict):
lowerCamelCase :Tuple = get_deta_config(a_)
# load original state dict
if model_name == "deta-swin-large":
lowerCamelCase :Optional[Any] = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''')
elif model_name == "deta-swin-large-o365":
lowerCamelCase :Optional[Any] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''')
else:
raise ValueError(F"Model name {model_name} not supported")
lowerCamelCase :int = torch.load(a_ , map_location='''cpu''')['''model''']
# original state dict
for name, param in state_dict.items():
print(a_ , param.shape)
# rename keys
lowerCamelCase :Union[str, Any] = create_rename_keys(a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_swin_q_k_v(a_ , config.backbone_config)
read_in_decoder_q_k_v(a_ , a_)
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
lowerCamelCase :List[str] = state_dict.pop(a_)
lowerCamelCase :List[str] = val
if "input_proj" in key:
lowerCamelCase :Optional[Any] = state_dict.pop(a_)
lowerCamelCase :str = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
lowerCamelCase :Dict = state_dict.pop(a_)
lowerCamelCase :str = val
# finally, create HuggingFace model and load state dict
lowerCamelCase :Tuple = DetaForObjectDetection(a_)
model.load_state_dict(a_)
model.eval()
lowerCamelCase :Any = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(a_)
# load image processor
lowerCamelCase :Tuple = DetaImageProcessor(format='''coco_detection''')
# verify our conversion on image
lowerCamelCase :int = prepare_img()
lowerCamelCase :List[str] = processor(images=a_ , return_tensors='''pt''')
lowerCamelCase :int = encoding['''pixel_values''']
lowerCamelCase :List[str] = model(pixel_values.to(a_))
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3])
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3])
if model_name == "deta-swin-large":
lowerCamelCase :str = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]])
lowerCamelCase :Optional[int] = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]])
elif model_name == "deta-swin-large-o365":
lowerCamelCase :Optional[int] = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]])
lowerCamelCase :List[Any] = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]])
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(a_) , atol=1e-4)
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(a_) , atol=1e-4)
print('''Everything ok!''')
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}...")
Path(a_).mkdir(exist_ok=a_)
model.save_pretrained(a_)
processor.save_pretrained(a_)
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''')
model.push_to_hub(F"jozhang97/{model_name}")
processor.push_to_hub(F"jozhang97/{model_name}")
if __name__ == "__main__":
A__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
A__ : int = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 721
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 0
|
def _lowerCamelCase ( a_ : float , a_ : float):
if density <= 0:
raise ValueError('''Impossible fluid density''')
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''')
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : str , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : int ):
lowerCamelCase :List[Any] = '''lower newer'''
lowerCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''lower newer'''
lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[str] = self.get_tokenizer()
lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' )
lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase :Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 49
| 0
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = XLMRobertaTokenizer
_UpperCAmelCase = XLMRobertaTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def snake_case ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase :List[Any] = XLMRobertaTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict ):
lowerCamelCase :List[str] = '''<pad>'''
lowerCamelCase :Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__snake_case ) , 1002 )
def snake_case ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def snake_case ( self : Tuple ):
lowerCamelCase :str = XLMRobertaTokenizer(__snake_case , keep_accents=__snake_case )
lowerCamelCase :int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase :str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCamelCase :Tuple = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase :Any = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case ( self : str ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase :int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :List[str] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = tempfile.mkdtemp()
lowerCamelCase :List[Any] = tokenizer_r.save_pretrained(__snake_case )
lowerCamelCase :List[str] = tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase :Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
lowerCamelCase :Dict = tokenizer_r.from_pretrained(__snake_case )
lowerCamelCase :Optional[Any] = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
lowerCamelCase :List[str] = tempfile.mkdtemp()
lowerCamelCase :Optional[Any] = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
lowerCamelCase :Any = tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
lowerCamelCase :Any = tokenizer_r.from_pretrained(__snake_case )
lowerCamelCase :Optional[int] = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
lowerCamelCase :Dict = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
lowerCamelCase :Optional[int] = tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase :Tuple = tokenizer_r.from_pretrained(__snake_case )
lowerCamelCase :str = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@cached_property
def snake_case ( self : Optional[Any] ):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def snake_case ( self : Optional[int] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__snake_case , f.name )
lowerCamelCase :Optional[int] = XLMRobertaTokenizer(f.name , keep_accents=__snake_case )
lowerCamelCase :Any = pickle.dumps(__snake_case )
pickle.loads(__snake_case )
def snake_case ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowerCamelCase :Optional[Any] = self.get_tokenizer()
lowerCamelCase :Tuple = self.get_rust_tokenizer()
lowerCamelCase :List[str] = '''I was born in 92000, and this is falsé.'''
lowerCamelCase :Any = tokenizer.tokenize(__snake_case )
lowerCamelCase :Any = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
lowerCamelCase :Dict = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase :List[str] = tokenizer.encode(__snake_case )
lowerCamelCase :Dict = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = '''Hello World!'''
lowerCamelCase :Dict = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowerCamelCase :Optional[int] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def snake_case ( self : str ):
# fmt: off
lowerCamelCase :Any = {'''input_ids''': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 701
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any ):
lowerCamelCase :List[str] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase :Any = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(__snake_case ) , torch_builtin(__snake_case ) ) )
self.assertFalse(torch.allclose(gelu_python(__snake_case ) , gelu_new(__snake_case ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :int = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase :List[Any] = get_activation('''gelu''' )
lowerCamelCase :Any = get_activation('''gelu_10''' )
lowerCamelCase :int = torch_builtin(__snake_case )
lowerCamelCase :Any = geluaa(__snake_case )
lowerCamelCase :Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__snake_case ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def snake_case ( self : Optional[int] ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(__snake_case ):
get_activation('''bogus''' )
with self.assertRaises(__snake_case ):
get_activation(__snake_case )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[Any] = get_activation('''gelu''' )
lowerCamelCase :Optional[Any] = 1
lowerCamelCase :List[str] = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = acta.a
| 702
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple , __snake_case : Dict , __snake_case : Tuple ):
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Any , __snake_case : int = 1 , __snake_case : int = 100 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[float] = None , __snake_case : bool = True , ):
if audio_length_in_s is None:
lowerCamelCase :Union[str, Any] = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase :List[str] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase :int = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
F" {3 * down_scale_factor / self.unet.config.sample_rate}." )
lowerCamelCase :Union[str, Any] = int(__snake_case )
if sample_size % down_scale_factor != 0:
lowerCamelCase :Dict = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
F" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
''' process.''' )
lowerCamelCase :str = int(__snake_case )
lowerCamelCase :List[Any] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase :Tuple = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
lowerCamelCase :str = randn_tensor(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
# set step values
self.scheduler.set_timesteps(__snake_case , device=audio.device )
lowerCamelCase :Optional[Any] = self.scheduler.timesteps.to(__snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase :List[Any] = self.unet(__snake_case , __snake_case ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase :Any = self.scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
lowerCamelCase :List[str] = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowerCamelCase :Optional[Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__snake_case )
| 703
|
import os
from math import logaa
def _lowerCamelCase ( a_ : str = "base_exp.txt"):
lowerCamelCase :float = 0
lowerCamelCase :Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))):
lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''',''')))
if x * logaa(a_) > largest:
lowerCamelCase :List[Any] = x * logaa(a_)
lowerCamelCase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
| 0
|
'''simple docstring'''
# Imports
import numpy as np
class _lowerCAmelCase :
def __init__( self : List[Any] , __snake_case : List[Any]=None , __snake_case : Optional[int]=None , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Union[str, Any]=None ):
self.set_matricies(red=__snake_case , green=__snake_case , blue=__snake_case , red_edge=__snake_case , nir=__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any]=None , __snake_case : str=None , __snake_case : Optional[int]=None , __snake_case : Any=None , __snake_case : Optional[Any]=None ):
if red is not None:
lowerCamelCase :Union[str, Any] = red
if green is not None:
lowerCamelCase :Tuple = green
if blue is not None:
lowerCamelCase :Optional[Any] = blue
if red_edge is not None:
lowerCamelCase :Any = red_edge
if nir is not None:
lowerCamelCase :List[Any] = nir
return True
def snake_case ( self : Dict , __snake_case : Dict="" , __snake_case : Optional[Any]=None , __snake_case : Optional[int]=None , __snake_case : List[str]=None , __snake_case : Any=None , __snake_case : Any=None ):
self.set_matricies(red=__snake_case , green=__snake_case , blue=__snake_case , red_edge=__snake_case , nir=__snake_case )
lowerCamelCase :List[Any] = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def snake_case ( self : List[Any] ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def snake_case ( self : List[str] ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def snake_case ( self : Optional[int] ):
return self.nir * (self.red / (self.green**2))
def snake_case ( self : List[Any] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def snake_case ( self : Optional[Any] ):
return (self.nir - self.red) / (self.nir + self.red)
def snake_case ( self : str ):
return (self.nir - self.blue) / (self.nir + self.blue)
def snake_case ( self : List[Any] ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def snake_case ( self : List[str] ):
return (self.nir - self.green) / (self.nir + self.green)
def snake_case ( self : str ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def snake_case ( self : Optional[int] ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def snake_case ( self : List[str] ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def snake_case ( self : List[Any] ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def snake_case ( self : Optional[Any] , __snake_case : str=0.0_8 , __snake_case : Optional[int]=1.2_2 , __snake_case : Dict=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def snake_case ( self : List[str] ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def snake_case ( self : Dict ):
return (self.nir / self.green) - 1
def snake_case ( self : str ):
return (self.nir / self.redEdge) - 1
def snake_case ( self : Any ):
return (self.red - self.blue) / self.red
def snake_case ( self : Any ):
lowerCamelCase :int = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def snake_case ( self : Any ):
return self.nir - self.green
def snake_case ( self : Optional[Any] ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Dict = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def snake_case ( self : Optional[int] , __snake_case : str=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def snake_case ( self : int , __snake_case : Optional[Any]=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def snake_case ( self : Tuple ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def snake_case ( self : Tuple , __snake_case : Dict=None , __snake_case : Dict=None ):
return (self.nir - b) / (a * self.red)
def snake_case ( self : Dict ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def snake_case ( self : List[Any] ):
return (self.red + self.green + self.blue) / 30.5
def snake_case ( self : int ):
return self.nir / self.red
def snake_case ( self : List[Any] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def snake_case ( self : Any ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def snake_case ( self : Dict ):
return self.green / (self.nir + self.red + self.green)
def snake_case ( self : Optional[int] ):
return self.nir / (self.nir + self.red + self.green)
def snake_case ( self : int ):
return self.red / (self.nir + self.red + self.green)
def snake_case ( self : Any ):
return (self.green - self.red) / (self.green + self.red)
def snake_case ( self : str ):
return (self.red - self.green) / (self.red + self.green)
def snake_case ( self : int ):
lowerCamelCase :Optional[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCamelCase :Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def snake_case ( self : Optional[Any] ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def snake_case ( self : int ):
return self.nir / self.red
def snake_case ( self : Any ):
return (self.ndvi() + 0.5) ** (1 / 2)
def snake_case ( self : int ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 704
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = -1
lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :Optional[int] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[Any] = -1
lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case )
lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowerCamelCase :Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[str] = -1
lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :int = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 )
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 705
|
import math
def _lowerCamelCase ( a_ : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( a_ : float = 0.1):
lowerCamelCase :Dict = 3
lowerCamelCase :List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 706
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = -1
lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :Optional[int] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[Any] = -1
lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case )
lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowerCamelCase :Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[str] = -1
lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :int = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 )
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 49
| 0
|
def _lowerCamelCase ( a_ : int = 50_00_00_00):
lowerCamelCase :int = set()
lowerCamelCase :Union[str, Any] = int((limit - 24) ** (1 / 2))
lowerCamelCase :Optional[Any] = set(range(3 , prime_square_limit + 1 , 2))
primes.add(2)
for p in range(3 , prime_square_limit + 1 , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , a_)))
for primea in primes:
lowerCamelCase :Any = primea * primea
for primea in primes:
lowerCamelCase :Union[str, Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCamelCase :List[Any] = primea * primea * primea * primea
lowerCamelCase :Dict = square + cube + tetr
if total >= limit:
break
ret.add(a_)
return len(a_)
if __name__ == "__main__":
print(F'{solution() = }')
| 707
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer"
raise TypeError(a_)
if number < 1:
raise ValueError('''Input must be a positive integer''')
return -1 if len(prime_factors(a_)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['image_processor', 'tokenizer']
_UpperCAmelCase = 'CLIPImageProcessor'
_UpperCAmelCase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] , __snake_case : Union[str, Any]=None , __snake_case : Tuple=None , **__snake_case : Dict ):
lowerCamelCase :int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
lowerCamelCase :Dict = kwargs.pop('''feature_extractor''' )
lowerCamelCase :Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : Tuple , __snake_case : str=None , __snake_case : Dict=None , __snake_case : int=None , **__snake_case : str ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase :Dict = self.tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case )
if images is not None:
lowerCamelCase :Union[str, Any] = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is not None and images is not None:
lowerCamelCase :Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case )
def snake_case ( self : int , *__snake_case : int , **__snake_case : List[Any] ):
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def snake_case ( self : Optional[int] , *__snake_case : int , **__snake_case : Any ):
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def snake_case ( self : int ):
lowerCamelCase :int = self.tokenizer.model_input_names
lowerCamelCase :Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case ( self : List[str] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def snake_case ( self : Dict ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor
| 708
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
| 0
|
from __future__ import annotations
def _lowerCamelCase ( a_ : list[int] , a_ : int):
if len(a_) == 0:
return False
lowerCamelCase :Optional[int] = len(a_) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , a_)
else:
return binary_search(a_list[midpoint + 1 :] , a_)
if __name__ == "__main__":
_UpperCamelCase = input("""Enter numbers separated by comma:\n""").strip()
_UpperCamelCase = [int(item.strip()) for item in user_input.split(""",""")]
_UpperCamelCase = int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCamelCase = """""" if binary_search(sequence, target) else """not """
print(F'{target} was {not_str}found in {sequence}')
| 709
|
def _lowerCamelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 49
| 0
|
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :int = get_activation('''swish''' )
self.assertIsInstance(__snake_case , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = get_activation('''silu''' )
self.assertIsInstance(__snake_case , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def snake_case ( self : int ):
lowerCamelCase :Dict = get_activation('''mish''' )
self.assertIsInstance(__snake_case , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def snake_case ( self : List[str] ):
lowerCamelCase :Union[str, Any] = get_activation('''gelu''' )
self.assertIsInstance(__snake_case , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
def _lowerCamelCase ( ):
return [list(range(10_00 - i , -10_00 - i , -1)) for i in range(10_00)]
A__ = generate_large_matrix()
A__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _lowerCamelCase ( a_ : list[list[int]]):
assert all(row == sorted(a_ , reverse=a_) for row in grid)
assert all(list(a_) == sorted(a_ , reverse=a_) for col in zip(*a_))
def _lowerCamelCase ( a_ : list[int]):
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :Dict = len(a_) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCamelCase :str = (left + right) // 2
lowerCamelCase :Optional[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCamelCase :Any = mid + 1
else:
lowerCamelCase :int = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(a_)
def _lowerCamelCase ( a_ : list[list[int]]):
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = len(grid[0])
for i in range(len(a_)):
lowerCamelCase :str = find_negative_index(grid[i][:bound])
total += bound
return (len(a_) * len(grid[0])) - total
def _lowerCamelCase ( a_ : list[list[int]]):
return len([number for row in grid for number in row if number < 0])
def _lowerCamelCase ( a_ : list[list[int]]):
lowerCamelCase :Tuple = 0
for row in grid:
for i, number in enumerate(a_):
if number < 0:
total += len(a_) - i
break
return total
def _lowerCamelCase ( ):
from timeit import timeit
print('''Running benchmarks''')
lowerCamelCase :List[Any] = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCamelCase :Optional[int] = timeit(F"{func}(grid=grid)" , setup=a_ , number=5_00)
print(F"{func}() took {time:0.4f} seconds")
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 711
|
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49
| 0
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A__ = """platform"""
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def _lowerCamelCase ( a_ : str , a_ : Union[str, Any] , a_ : List[str]=None , a_ : Optional[Any]=None , a_ : List[str]=None , a_ : List[str]=None , a_ : Optional[Any]=None , a_ : int=None , ):
if attention_mask is None:
lowerCamelCase :Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0)
if decoder_attention_mask is None:
lowerCamelCase :Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0)
if head_mask is None:
lowerCamelCase :List[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
lowerCamelCase :List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
lowerCamelCase :Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowerCAmelCase :
def __init__( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any]=13 , __snake_case : Any=7 , __snake_case : List[str]=True , __snake_case : Any=False , __snake_case : Optional[int]=99 , __snake_case : List[Any]=16 , __snake_case : str=2 , __snake_case : List[Any]=4 , __snake_case : List[Any]=4 , __snake_case : Tuple="gelu" , __snake_case : str=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=32 , __snake_case : int=2 , __snake_case : Optional[int]=1 , __snake_case : str=0 , __snake_case : str=0.0_2 , ):
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :Any = batch_size
lowerCamelCase :str = seq_length
lowerCamelCase :Optional[Any] = is_training
lowerCamelCase :Tuple = use_labels
lowerCamelCase :List[str] = vocab_size
lowerCamelCase :Any = hidden_size
lowerCamelCase :str = num_hidden_layers
lowerCamelCase :Any = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :List[Any] = hidden_act
lowerCamelCase :Tuple = hidden_dropout_prob
lowerCamelCase :Dict = attention_probs_dropout_prob
lowerCamelCase :List[Any] = max_position_embeddings
lowerCamelCase :List[str] = eos_token_id
lowerCamelCase :Tuple = pad_token_id
lowerCamelCase :Tuple = bos_token_id
lowerCamelCase :int = initializer_range
def snake_case ( self : str ):
lowerCamelCase :str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCamelCase :Union[str, Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCamelCase :List[Any] = shift_tokens_right(__snake_case , 1 , 2 )
lowerCamelCase :Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__snake_case , )
lowerCamelCase :Dict = prepare_blenderbot_inputs_dict(__snake_case , __snake_case , __snake_case )
return config, inputs_dict
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self : List[str] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Union[str, Any] ):
lowerCamelCase :Tuple = 20
lowerCamelCase :Union[str, Any] = model_class_name(__snake_case )
lowerCamelCase :Optional[Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase :Dict = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase :Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case )
lowerCamelCase :Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCamelCase :int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase :Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , )
lowerCamelCase :Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCamelCase :str = model.decode(
decoder_input_ids[:, -1:] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__snake_case , )
lowerCamelCase :Optional[int] = model.decode(__snake_case , __snake_case )
lowerCamelCase :Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def snake_case ( self : Optional[int] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Tuple ):
lowerCamelCase :List[str] = 20
lowerCamelCase :Union[str, Any] = model_class_name(__snake_case )
lowerCamelCase :Any = model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase :Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase :str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCamelCase :Optional[int] = model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case )
lowerCamelCase :Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase :int = model.decode(
decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , )
lowerCamelCase :Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCamelCase :int = model.decode(
decoder_input_ids[:, -1:] , __snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__snake_case , decoder_position_ids=__snake_case , )
lowerCamelCase :str = model.decode(__snake_case , __snake_case , decoder_attention_mask=__snake_case )
lowerCamelCase :Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
_UpperCAmelCase = 9_9
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Dict = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase :Union[str, Any] = input_ids.shape[0]
lowerCamelCase :Any = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def snake_case ( self : List[str] ):
lowerCamelCase :List[Any] = self._get_config_and_data()
lowerCamelCase :str = FlaxBlenderbotSmallForConditionalGeneration(__snake_case )
lowerCamelCase :Any = lm_model(input_ids=__snake_case )
lowerCamelCase :Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
lowerCamelCase :Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(__snake_case )
lowerCamelCase :List[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
lowerCamelCase :List[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
lowerCamelCase :List[str] = lm_model(input_ids=__snake_case , decoder_input_ids=__snake_case )
lowerCamelCase :Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Optional[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
lowerCamelCase :Union[str, Any] = shift_tokens_right(__snake_case , 1 , 2 )
lowerCamelCase :Dict = np.equal(__snake_case , 1 ).astype(np.floataa ).sum()
lowerCamelCase :str = np.equal(__snake_case , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__snake_case , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = True
_UpperCAmelCase = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_UpperCAmelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def snake_case ( self : int ):
lowerCamelCase :Optional[int] = FlaxBlenderbotSmallModelTester(self )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__snake_case , __snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase :Tuple = self._prepare_for_class(__snake_case , __snake_case )
lowerCamelCase :Optional[int] = model_class(__snake_case )
@jax.jit
def encode_jitted(__snake_case : Tuple , __snake_case : Optional[int]=None , **__snake_case : Optional[Any] ):
return model.encode(input_ids=__snake_case , attention_mask=__snake_case )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase :Optional[int] = encode_jitted(**__snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase :Any = encode_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( self : int ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase :Tuple = model_class(__snake_case )
lowerCamelCase :Dict = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCamelCase :Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__snake_case : Any , __snake_case : Any , __snake_case : Union[str, Any] ):
return model.decode(
decoder_input_ids=__snake_case , decoder_attention_mask=__snake_case , encoder_outputs=__snake_case , )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase :Tuple = decode_jitted(**__snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase :Optional[Any] = decode_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case ( self : List[str] ):
for model_class_name in self.all_model_classes:
lowerCamelCase :Optional[Any] = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase :Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
lowerCamelCase :Union[str, Any] = model(__snake_case )
self.assertIsNotNone(__snake_case )
| 712
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ):
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Any = image_size
lowerCamelCase :Union[str, Any] = patch_size
lowerCamelCase :Any = num_channels
lowerCamelCase :List[Any] = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :Any = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :List[str] = hidden_act
lowerCamelCase :List[str] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :Optional[int] = initializer_range
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Any = scope
lowerCamelCase :Union[str, Any] = n_targets
lowerCamelCase :Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] ):
lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase :List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase :Optional[int] = []
for i in range(self.batch_size ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__snake_case )
lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case )
labels.append(__snake_case )
lowerCamelCase :str = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowerCamelCase :int = YolosForObjectDetection(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :str = model(pixel_values=__snake_case )
lowerCamelCase :Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : int ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs
lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_UpperCAmelCase = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ):
lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase :Dict = []
for i in range(self.model_tester.batch_size ):
lowerCamelCase :Optional[Any] = {}
lowerCamelCase :List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long )
lowerCamelCase :str = torch.ones(
self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float )
labels.append(__snake_case )
lowerCamelCase :Union[str, Any] = labels
return inputs_dict
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = YolosModelTester(self )
lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Tuple ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Tuple = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :int = True
# in YOLOS, the seq_len is different
lowerCamelCase :str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase :str = True
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = True
lowerCamelCase :int = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Optional[Any] = True
lowerCamelCase :str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Tuple = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Optional[int] = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :List[Any] = True
lowerCamelCase :Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = 1
self.assertEqual(out_len + added_hidden_states , len(__snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[str] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Optional[Any] = outputs.hidden_states
lowerCamelCase :Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# YOLOS has a different seq_length
lowerCamelCase :List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Any = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCamelCase :int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , )
lowerCamelCase :Any = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify postprocessing
lowerCamelCase :List[str] = image_processor.post_process_object_detection(
__snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case )
lowerCamelCase :str = [75, 75, 17, 63, 17]
lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
| 49
| 0
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'char'
_UpperCAmelCase = 'bpe'
_UpperCAmelCase = 'wp'
A__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['image_processor', 'char_tokenizer']
_UpperCAmelCase = 'ViTImageProcessor'
_UpperCAmelCase = 'MgpstrTokenizer'
def __init__( self : List[str] , __snake_case : Dict=None , __snake_case : Tuple=None , **__snake_case : Tuple ):
lowerCamelCase :Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
lowerCamelCase :List[Any] = kwargs.pop('''feature_extractor''' )
lowerCamelCase :List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
lowerCamelCase :Dict = tokenizer
lowerCamelCase :Any = AutoTokenizer.from_pretrained('''gpt2''' )
lowerCamelCase :Optional[int] = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : Optional[Any] , __snake_case : Tuple=None , __snake_case : Union[str, Any]=None , __snake_case : Dict=None , **__snake_case : Tuple ):
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCamelCase :List[str] = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is not None:
lowerCamelCase :Any = self.char_tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase :Tuple = encodings['''input_ids''']
return inputs
def snake_case ( self : Optional[int] , __snake_case : Tuple ):
lowerCamelCase :Any = sequences
lowerCamelCase :Union[str, Any] = char_preds.size(0 )
lowerCamelCase :Tuple = self._decode_helper(__snake_case , '''char''' )
lowerCamelCase :Tuple = self._decode_helper(__snake_case , '''bpe''' )
lowerCamelCase :Optional[Any] = self._decode_helper(__snake_case , '''wp''' )
lowerCamelCase :Optional[Any] = []
lowerCamelCase :Dict = []
for i in range(__snake_case ):
lowerCamelCase :Optional[int] = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCamelCase :Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCamelCase :Dict = scores.index(max(__snake_case ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCamelCase :Tuple = {}
lowerCamelCase :int = final_strs
lowerCamelCase :str = final_scores
lowerCamelCase :Dict = char_strs
lowerCamelCase :Optional[int] = bpe_strs
lowerCamelCase :str = wp_strs
return out
def snake_case ( self : List[str] , __snake_case : Tuple , __snake_case : List[Any] ):
if format == DecodeType.CHARACTER:
lowerCamelCase :List[Any] = self.char_decode
lowerCamelCase :List[Any] = 1
lowerCamelCase :Tuple = '''[s]'''
elif format == DecodeType.BPE:
lowerCamelCase :Tuple = self.bpe_decode
lowerCamelCase :Dict = 2
lowerCamelCase :str = '''#'''
elif format == DecodeType.WORDPIECE:
lowerCamelCase :Dict = self.wp_decode
lowerCamelCase :List[Any] = 102
lowerCamelCase :List[Any] = '''[SEP]'''
else:
raise ValueError(F"Format {format} is not supported." )
lowerCamelCase :Any = [], []
lowerCamelCase :Dict = pred_logits.size(0 )
lowerCamelCase :Any = pred_logits.size(1 )
lowerCamelCase :Dict = pred_logits.topk(1 , dim=-1 , largest=__snake_case , sorted=__snake_case )
lowerCamelCase :Optional[Any] = preds_index.view(-1 , __snake_case )[:, 1:]
lowerCamelCase :int = decoder(__snake_case )
lowerCamelCase :Optional[int] = torch.nn.functional.softmax(__snake_case , dim=2 ).max(dim=2 )
lowerCamelCase :Optional[Any] = preds_max_prob[:, 1:]
for index in range(__snake_case ):
lowerCamelCase :str = preds_str[index].find(__snake_case )
lowerCamelCase :List[Any] = preds_str[index][:pred_eos]
lowerCamelCase :Union[str, Any] = preds_index[index].cpu().tolist()
lowerCamelCase :Dict = pred_index.index(__snake_case ) if eos_token in pred_index else -1
lowerCamelCase :Optional[Any] = preds_max_prob[index][: pred_eos_index + 1]
lowerCamelCase :Any = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__snake_case )
conf_scores.append(__snake_case )
return dec_strs, conf_scores
def snake_case ( self : Union[str, Any] , __snake_case : Dict ):
lowerCamelCase :int = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(__snake_case )]
return decode_strs
def snake_case ( self : List[Any] , __snake_case : Tuple ):
return self.bpe_tokenizer.batch_decode(__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : str ):
lowerCamelCase :Dict = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(__snake_case )]
return decode_strs
| 714
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase :Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase :Any = test_metrics
@require_cpu
def snake_case ( self : Dict ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self : int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self : Any ):
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self : Optional[int] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 49
| 0
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
A__ = float("""nan""")
class _lowerCAmelCase :
def __init__( self : Union[str, Any] , __snake_case : Any ):
lowerCamelCase :Optional[int] = sys.stdout
lowerCamelCase :Optional[int] = open(__snake_case , '''a''' )
def __getattr__( self : str , __snake_case : List[str] ):
return getattr(self.stdout , __snake_case )
def snake_case ( self : Optional[int] , __snake_case : Dict ):
self.stdout.write(__snake_case )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , __snake_case , 0 , re.M ) )
def _lowerCamelCase ( a_ : Tuple=80 , a_ : str=False):
lowerCamelCase :Tuple = []
# deal with critical env vars
lowerCamelCase :Dict = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
lowerCamelCase :Optional[int] = os.environ.get(a_ , a_)
if val is not None:
cmd.append(F"{key}={val}")
# python executable (not always needed if the script is executable)
lowerCamelCase :str = sys.executable if full_python_path else sys.executable.split('''/''')[-1]
cmd.append(a_)
# now the normal args
cmd += list(map(shlex.quote , sys.argv))
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCamelCase :List[Any] = []
lowerCamelCase :Optional[int] = ''''''
while len(a_) > 0:
current_line += F"{cmd.pop(0)} "
if len(a_) == 0 or len(a_) + len(cmd[0]) + 1 > max_width - 1:
lines.append(a_)
lowerCamelCase :List[Any] = ''''''
return "\\\n".join(a_)
def _lowerCamelCase ( a_ : str , a_ : Optional[int]):
# unwrap multi-line input
lowerCamelCase :Optional[int] = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd)
# remove --output_dir if any and set our own
lowerCamelCase :int = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd)
args.base_cmd += F" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
lowerCamelCase :Optional[Any] = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd)
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd)
def _lowerCamelCase ( a_ : Tuple , a_ : List[str] , a_ : str , a_ : List[Any] , a_ : List[str] , a_ : List[Any] , a_ : Tuple):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0)
return dict(
{k: random.uniform(0 , 1_00) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222])} , )
lowerCamelCase :Union[str, Any] = subprocess.run(a_ , capture_output=a_ , text=a_)
if verbose:
print('''STDOUT''' , result.stdout)
print('''STDERR''' , result.stderr)
# save the streams
lowerCamelCase :Dict = variation.replace(''' ''' , '''-''')
with open(Path(a_) / F"log.{prefix}.stdout.txt" , '''w''') as f:
f.write(result.stdout)
with open(Path(a_) / F"log.{prefix}.stderr.txt" , '''w''') as f:
f.write(result.stderr)
if result.returncode != 0:
if verbose:
print('''failed''')
return {target_metric_key: nan}
with io.open(F"{output_dir}/all_results.json" , '''r''' , encoding='''utf-8''') as f:
lowerCamelCase :List[str] = json.load(a_)
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _lowerCamelCase ( a_ : List[str] , a_ : List[str] , a_ : Optional[Any] , a_ : List[Any] , a_ : Dict , a_ : int , a_ : Any , a_ : str , a_ : List[str] , a_ : int , ):
lowerCamelCase :Dict = []
lowerCamelCase :List[Any] = []
lowerCamelCase :Any = F"{id}: {variation:<{longest_variation_len}}"
lowerCamelCase :int = F"{preamble}: "
lowerCamelCase :str = set(report_metric_keys + [target_metric_key])
for i in tqdm(range(a_) , desc=a_ , leave=a_):
lowerCamelCase :Union[str, Any] = process_run_single(
a_ , a_ , a_ , a_ , a_ , a_ , a_)
lowerCamelCase :Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(a_):
metrics.append(a_)
results.append(a_)
outcome += "✓"
else:
outcome += "✘"
lowerCamelCase :List[str] = F"\33[2K\r{outcome}"
if len(a_) > 0:
lowerCamelCase :Tuple = {k: fmean([x[k] for x in metrics]) for k in metrics[0].keys()}
lowerCamelCase :Union[str, Any] = round(mean_metrics[target_metric_key] , 2)
lowerCamelCase :Optional[int] = F"{outcome} {mean_target}"
if len(a_) > 1:
results_str += F" {tuple(round(a_ , 2) for x in results)}"
print(a_)
lowerCamelCase :Union[str, Any] = variation
return mean_metrics
else:
print(a_)
return {variation_key: variation, target_metric_key: nan}
def _lowerCamelCase ( ):
lowerCamelCase :List[str] = torch.cuda.get_device_properties(torch.device('''cuda'''))
return F"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : str , a_ : Optional[Any] , a_ : Optional[Any]):
lowerCamelCase :Any = pd.DataFrame(a_)
lowerCamelCase :Any = '''variation'''
lowerCamelCase :List[Any] = '''diff_%'''
lowerCamelCase :Any = nan
if base_variation is not None and len(df[df[variation_key] == base_variation]):
# this may still return nan
lowerCamelCase :Tuple = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(a_):
# as a fallback, use the minimal value as the sentinel
lowerCamelCase :Optional[int] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(a_):
lowerCamelCase :Dict = df.apply(
lambda a_: round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value)
if not math.isnan(r[target_metric_key])
else 0 , axis='''columns''' , )
# re-order columns
lowerCamelCase :Tuple = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCamelCase :Dict = df.reindex(a_ , axis='''columns''') # reorder cols
# capitalize
lowerCamelCase :List[str] = df.rename(str.capitalize , axis='''columns''')
# make the cols as narrow as possible
lowerCamelCase :Optional[Any] = df.rename(lambda a_: c.replace('''_''' , '''<br>''') , axis='''columns''')
lowerCamelCase :Union[str, Any] = df.rename(lambda a_: c.replace('''_''' , '''\n''') , axis='''columns''')
lowerCamelCase :Any = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=a_ , floatfmt='''.2f''')]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=a_ , floatfmt='''.2f''')]
print('''\n\n'''.join(a_))
def _lowerCamelCase ( ):
lowerCamelCase :Dict = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=a_ , type=a_ , required=a_ , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=a_ , type=a_ , nargs='''+''' , required=a_ , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=a_ , type=a_ , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=a_ , type=a_ , required=a_ , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=a_ , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=a_ , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=a_ , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=a_ , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
lowerCamelCase :Optional[Any] = parser.parse_args()
lowerCamelCase :List[Any] = args.output_dir
Path(a_).mkdir(exist_ok=a_)
lowerCamelCase :List[str] = get_base_command(a_ , a_)
# split each dimension into its --foo variations
lowerCamelCase :int = [list(map(str.strip , re.split(R'''\|''' , a_))) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCamelCase :Optional[int] = list(map(str.strip , map(''' '''.join , itertools.product(*a_))))
lowerCamelCase :Tuple = max(len(a_) for x in variations)
# split wanted keys
lowerCamelCase :Tuple = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCamelCase :Dict = F"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.txt"
print(F"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt")
print(F"and this script's output is also piped into {report_fn}")
lowerCamelCase :Optional[Any] = Tee(a_)
print(F"\n*** Running {len(a_)} benchmarks:")
print(F"Base command: {' '.join(a_)}")
lowerCamelCase :Dict = '''variation'''
lowerCamelCase :Optional[Any] = []
for id, variation in enumerate(tqdm(a_ , desc='''Total completion: ''' , leave=a_)):
lowerCamelCase :int = base_cmd + variation.split()
results.append(
process_run(
id + 1 , a_ , a_ , a_ , a_ , args.target_metric_key , a_ , args.repeat_times , a_ , args.verbose , ))
process_results(a_ , args.target_metric_key , a_ , args.base_variation , a_)
if __name__ == "__main__":
main()
| 715
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'pegasus'
_UpperCAmelCase = ['past_key_values']
_UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[Any] , __snake_case : List[Any]=50265 , __snake_case : Any=1024 , __snake_case : Tuple=12 , __snake_case : Union[str, Any]=4096 , __snake_case : List[str]=16 , __snake_case : Union[str, Any]=12 , __snake_case : List[Any]=4096 , __snake_case : Any=16 , __snake_case : Optional[int]=0.0 , __snake_case : List[str]=0.0 , __snake_case : Dict=True , __snake_case : Union[str, Any]=True , __snake_case : Tuple="gelu" , __snake_case : Union[str, Any]=1024 , __snake_case : List[Any]=0.1 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=0.0 , __snake_case : int=0.0_2 , __snake_case : Optional[int]=0 , __snake_case : Union[str, Any]=False , __snake_case : Any=0 , __snake_case : Any=1 , __snake_case : List[Any]=1 , **__snake_case : Tuple , ):
lowerCamelCase :Any = vocab_size
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :Optional[int] = d_model
lowerCamelCase :int = encoder_ffn_dim
lowerCamelCase :Union[str, Any] = encoder_layers
lowerCamelCase :Tuple = encoder_attention_heads
lowerCamelCase :List[str] = decoder_ffn_dim
lowerCamelCase :Dict = decoder_layers
lowerCamelCase :Tuple = decoder_attention_heads
lowerCamelCase :List[Any] = dropout
lowerCamelCase :List[Any] = attention_dropout
lowerCamelCase :int = activation_dropout
lowerCamelCase :str = activation_function
lowerCamelCase :str = init_std
lowerCamelCase :Any = encoder_layerdrop
lowerCamelCase :Dict = decoder_layerdrop
lowerCamelCase :Union[str, Any] = use_cache
lowerCamelCase :int = encoder_layers
lowerCamelCase :Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
@property
def snake_case ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def snake_case ( self : Union[str, Any] ):
return self.d_model
| 716
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = LEDTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Any ):
super().setUp()
lowerCamelCase :Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :int = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : int , **__snake_case : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Dict , **__snake_case : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : Any ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def snake_case ( self : int ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''labels''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def snake_case ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.''']
lowerCamelCase :Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' )
lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' )
lowerCamelCase :Optional[int] = inputs['''input_ids''']
lowerCamelCase :Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']]
lowerCamelCase :str = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 49
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'donut-swin'
_UpperCAmelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , __snake_case : List[str]=224 , __snake_case : Optional[Any]=4 , __snake_case : List[Any]=3 , __snake_case : Optional[Any]=96 , __snake_case : Dict=[2, 2, 6, 2] , __snake_case : Optional[Any]=[3, 6, 12, 24] , __snake_case : str=7 , __snake_case : Dict=4.0 , __snake_case : List[str]=True , __snake_case : Optional[Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : List[str]=0.1 , __snake_case : List[str]="gelu" , __snake_case : List[str]=False , __snake_case : int=0.0_2 , __snake_case : List[Any]=1e-5 , **__snake_case : Dict , ):
super().__init__(**__snake_case )
lowerCamelCase :Optional[Any] = image_size
lowerCamelCase :Union[str, Any] = patch_size
lowerCamelCase :Optional[Any] = num_channels
lowerCamelCase :Dict = embed_dim
lowerCamelCase :Optional[int] = depths
lowerCamelCase :List[Any] = len(__snake_case )
lowerCamelCase :Any = num_heads
lowerCamelCase :int = window_size
lowerCamelCase :Dict = mlp_ratio
lowerCamelCase :str = qkv_bias
lowerCamelCase :int = hidden_dropout_prob
lowerCamelCase :List[Any] = attention_probs_dropout_prob
lowerCamelCase :List[Any] = drop_path_rate
lowerCamelCase :Union[str, Any] = hidden_act
lowerCamelCase :str = use_absolute_embeddings
lowerCamelCase :Dict = layer_norm_eps
lowerCamelCase :Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase :Optional[int] = int(embed_dim * 2 ** (len(__snake_case ) - 1) )
| 717
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
import unittest
import numpy as np
def _lowerCamelCase ( a_ : np.ndarray , a_ : np.ndarray , a_ : np.ndarray , a_ : np.ndarray | None = None , ):
lowerCamelCase :List[str] = np.shape(a_)
lowerCamelCase :str = np.shape(a_)
lowerCamelCase :int = np.shape(a_)
if shape_a[0] != shape_b[0]:
lowerCamelCase :Union[str, Any] = (
'''Expected the same number of rows for A and B. '''
F"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(a_)
if shape_b[1] != shape_c[1]:
lowerCamelCase :int = (
'''Expected the same number of columns for B and C. '''
F"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(a_)
lowerCamelCase :List[str] = pseudo_inv
if a_inv is None:
try:
lowerCamelCase :Tuple = np.linalg.inv(a_)
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''')
return mat_c - mat_b.T @ a_inv @ mat_b
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase :List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase :List[Any] = np.array([[2, 1], [6, 3]] )
lowerCamelCase :List[Any] = schur_complement(__snake_case , __snake_case , __snake_case )
lowerCamelCase :Union[str, Any] = np.block([[a, b], [b.T, c]] )
lowerCamelCase :List[Any] = np.linalg.det(__snake_case )
lowerCamelCase :Optional[int] = np.linalg.det(__snake_case )
lowerCamelCase :str = np.linalg.det(__snake_case )
self.assertAlmostEqual(__snake_case , det_a * det_s )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase :Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase :Tuple = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__snake_case ):
schur_complement(__snake_case , __snake_case , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase :Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase :Optional[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__snake_case ):
schur_complement(__snake_case , __snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 718
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
| 0
|
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
raise ValueError('''multiplicative_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''')
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :Tuple = str(a_)
while len(a_) != 1:
lowerCamelCase :Dict = [int(a_) for i in num_string]
lowerCamelCase :Union[str, Any] = 1
for i in range(0 , len(a_)):
total *= numbers[i]
lowerCamelCase :Union[str, Any] = str(a_)
steps += 1
return steps
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
raise ValueError('''additive_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''')
lowerCamelCase :Dict = 0
lowerCamelCase :str = str(a_)
while len(a_) != 1:
lowerCamelCase :int = [int(a_) for i in num_string]
lowerCamelCase :List[Any] = 0
for i in range(0 , len(a_)):
total += numbers[i]
lowerCamelCase :Union[str, Any] = str(a_)
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
import operator as op
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :int = []
lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation
lowerCamelCase :Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''')
print('''-''' * (30 + len(a_)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_) # append x to stack
# output in tabular format
print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
else:
lowerCamelCase :Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
lowerCamelCase :str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
stack.append(
str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , )
return int(stack[0])
if __name__ == "__main__":
A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 49
| 0
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
A__ = """Hello, World!"""
A__ = """en_XX"""
def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool):
lowerCamelCase :int = Path('''data_bin''')
lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , )
xmod.eval() # disable dropout
print(a_)
lowerCamelCase :Any = xmod.model.encoder.sentence_encoder
lowerCamelCase :List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a_)
lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_)
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight
lowerCamelCase :List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i]
lowerCamelCase :List[str] = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase :Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError('''Dimensions of self-attention weights do not match.''')
lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight
lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias
lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight
lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase :Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''')
lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias
lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase :Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''')
lowerCamelCase :int = xmod_layer.fca.weight
lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias
# output
lowerCamelCase :List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''')
lowerCamelCase :str = xmod_layer.fca.weight
lowerCamelCase :int = xmod_layer.fca.bias
lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight
lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError('''Lists of language adapters do not match.''')
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code]
lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code]
lowerCamelCase :List[Any] = from_adapter.fca.weight
lowerCamelCase :List[Any] = from_adapter.fca.bias
lowerCamelCase :Dict = from_adapter.fca.weight
lowerCamelCase :Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight
lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase :Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(a_)
lowerCamelCase :Any = model(a_)[0]
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_))
else:
lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item()
print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3)
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''')
if not success:
raise Exception('''Something went wRoNg''')
Path(a_).mkdir(parents=a_ , exist_ok=a_)
print(F"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
A__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 49
| 0
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
A__ : Tuple = logging.get_logger(__name__)
class _lowerCAmelCase :
_UpperCAmelCase = 4_2
_UpperCAmelCase = None
@staticmethod
def snake_case ( ):
raise NotImplementedError
def snake_case ( self : Tuple , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : str , **__snake_case : str ):
raise NotImplementedError
def snake_case ( self : Union[str, Any] , __snake_case : List[str] ):
raise NotImplementedError
def snake_case ( self : Optional[Any] ):
if not self.is_available():
raise RuntimeError(
F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def snake_case ( cls : Optional[Any] ):
return F"`pip install {cls.pip_package or cls.name}`"
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'optuna'
@staticmethod
def snake_case ( ):
return is_optuna_available()
def snake_case ( self : Optional[int] , __snake_case : Any , __snake_case : int , __snake_case : str , **__snake_case : Dict ):
return run_hp_search_optuna(__snake_case , __snake_case , __snake_case , **__snake_case )
def snake_case ( self : List[str] , __snake_case : List[str] ):
return default_hp_space_optuna(__snake_case )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'ray'
_UpperCAmelCase = '\'ray[tune]\''
@staticmethod
def snake_case ( ):
return is_ray_available()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : int , __snake_case : str , **__snake_case : Optional[int] ):
return run_hp_search_ray(__snake_case , __snake_case , __snake_case , **__snake_case )
def snake_case ( self : Tuple , __snake_case : Any ):
return default_hp_space_ray(__snake_case )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'sigopt'
@staticmethod
def snake_case ( ):
return is_sigopt_available()
def snake_case ( self : Any , __snake_case : str , __snake_case : int , __snake_case : str , **__snake_case : Optional[int] ):
return run_hp_search_sigopt(__snake_case , __snake_case , __snake_case , **__snake_case )
def snake_case ( self : Union[str, Any] , __snake_case : List[Any] ):
return default_hp_space_sigopt(__snake_case )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'wandb'
@staticmethod
def snake_case ( ):
return is_wandb_available()
def snake_case ( self : str , __snake_case : Any , __snake_case : int , __snake_case : str , **__snake_case : List[Any] ):
return run_hp_search_wandb(__snake_case , __snake_case , __snake_case , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : List[Any] ):
return default_hp_space_wandb(__snake_case )
A__ : str = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def _lowerCamelCase ( ):
lowerCamelCase :Union[str, Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(a_) > 0:
lowerCamelCase :Optional[int] = available_backends[0].name
if len(a_) > 1:
logger.info(
F"{len(a_)} hyperparameter search backends available. Using {name} as the default.")
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values()))
| 721
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 0
|
def _lowerCamelCase ( a_ : int , a_ : int):
return base * power(a_ , (exponent - 1)) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
A__ = int(input("""Enter the base: """).strip())
A__ = int(input("""Enter the exponent: """).strip())
A__ = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A__ = 1 / result
print(F'{base} to the power of {exponent} is {result}')
| 700
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : str , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : int ):
lowerCamelCase :List[Any] = '''lower newer'''
lowerCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''lower newer'''
lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[str] = self.get_tokenizer()
lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' )
lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase :Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 49
| 0
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Optional[int] , __snake_case : int , __snake_case : int ):
lowerCamelCase :Dict = jnp.ones((batch_size, length) ) / length
return scores
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = None
lowerCamelCase :Dict = 20
lowerCamelCase :Any = self._get_uniform_logits(batch_size=2 , length=__snake_case )
# tweak scores to not be uniform anymore
lowerCamelCase :Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase :Tuple = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase :Tuple = jax.nn.softmax(__snake_case , axis=-1 )
lowerCamelCase :List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase :Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase :Optional[int] = jax.nn.softmax(temp_dist_warper_sharper(__snake_case , scores.copy() , cur_len=__snake_case ) , axis=-1 )
lowerCamelCase :Any = jax.nn.softmax(temp_dist_warper_smoother(__snake_case , scores.copy() , cur_len=__snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = None
lowerCamelCase :Optional[int] = 10
lowerCamelCase :str = 2
# create ramp distribution
lowerCamelCase :str = np.broadcast_to(np.arange(__snake_case )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase :List[str] = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase :Optional[int] = FlaxTopKLogitsWarper(3 )
lowerCamelCase :int = top_k_warp(__snake_case , __snake_case , cur_len=__snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase :Union[str, Any] = 5
lowerCamelCase :List[str] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase :Union[str, Any] = np.broadcast_to(np.arange(__snake_case )[None, :] , (batch_size, length) ).copy()
lowerCamelCase :List[Any] = top_k_warp_safety_check(__snake_case , __snake_case , cur_len=__snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def snake_case ( self : Any ):
lowerCamelCase :List[str] = None
lowerCamelCase :Any = 10
lowerCamelCase :Tuple = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase :Optional[int] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase :int = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase :List[Any] = np.exp(top_p_warp(__snake_case , __snake_case , cur_len=__snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase :Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(__snake_case , __snake_case , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase :int = np.broadcast_to(np.arange(__snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase :int = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase :Dict = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase :List[Any] = top_p_warp(__snake_case , __snake_case , cur_len=__snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[Any] = 20
lowerCamelCase :Optional[int] = 4
lowerCamelCase :List[str] = 0
lowerCamelCase :int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__snake_case )
# check that min length is applied at length 5
lowerCamelCase :int = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase :int = 5
lowerCamelCase :List[Any] = self._get_uniform_logits(__snake_case , __snake_case )
lowerCamelCase :List[Any] = min_dist_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase :Optional[Any] = self._get_uniform_logits(__snake_case , __snake_case )
lowerCamelCase :Any = 15
lowerCamelCase :Any = min_dist_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertFalse(jnp.isinf(__snake_case ).any() )
def snake_case ( self : str ):
lowerCamelCase :Any = 20
lowerCamelCase :List[Any] = 4
lowerCamelCase :List[str] = 0
lowerCamelCase :Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__snake_case )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase :Any = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase :List[str] = 1
lowerCamelCase :Dict = self._get_uniform_logits(__snake_case , __snake_case )
lowerCamelCase :int = logits_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase :Optional[Any] = 3
lowerCamelCase :Union[str, Any] = self._get_uniform_logits(__snake_case , __snake_case )
lowerCamelCase :int = logits_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertFalse(jnp.isinf(__snake_case ).any() )
def snake_case ( self : Dict ):
lowerCamelCase :Optional[Any] = 20
lowerCamelCase :List[str] = 4
lowerCamelCase :Any = 0
lowerCamelCase :Union[str, Any] = 5
lowerCamelCase :Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=__snake_case , eos_token_id=__snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase :int = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase :Tuple = 4
lowerCamelCase :List[str] = self._get_uniform_logits(__snake_case , __snake_case )
lowerCamelCase :List[str] = logits_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase :Optional[int] = 3
lowerCamelCase :str = self._get_uniform_logits(__snake_case , __snake_case )
lowerCamelCase :str = logits_processor(__snake_case , __snake_case , cur_len=__snake_case )
self.assertFalse(jnp.isinf(__snake_case ).any() )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = 4
lowerCamelCase :Dict = 10
lowerCamelCase :int = 15
lowerCamelCase :Any = 2
lowerCamelCase :Dict = 1
lowerCamelCase :Union[str, Any] = 15
# dummy input_ids and scores
lowerCamelCase :int = ids_tensor((batch_size, sequence_length) , __snake_case )
lowerCamelCase :int = input_ids.copy()
lowerCamelCase :Tuple = self._get_uniform_logits(__snake_case , __snake_case )
lowerCamelCase :str = scores.copy()
# instantiate all dist processors
lowerCamelCase :List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase :str = FlaxTopKLogitsWarper(3 )
lowerCamelCase :Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase :int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__snake_case )
lowerCamelCase :Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__snake_case )
lowerCamelCase :Any = FlaxForcedEOSTokenLogitsProcessor(max_length=__snake_case , eos_token_id=__snake_case )
lowerCamelCase :Tuple = 10
# no processor list
lowerCamelCase :Dict = temp_dist_warp(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :Tuple = top_k_warp(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :Optional[Any] = top_p_warp(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :Dict = min_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :int = bos_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :Union[str, Any] = eos_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
# with processor list
lowerCamelCase :Dict = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase :int = processor(__snake_case , __snake_case , cur_len=__snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(__snake_case , __snake_case , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = 4
lowerCamelCase :Optional[int] = 10
lowerCamelCase :int = 15
lowerCamelCase :List[str] = 2
lowerCamelCase :Optional[int] = 1
lowerCamelCase :Tuple = 15
# dummy input_ids and scores
lowerCamelCase :Tuple = ids_tensor((batch_size, sequence_length) , __snake_case )
lowerCamelCase :List[str] = input_ids.copy()
lowerCamelCase :List[str] = self._get_uniform_logits(__snake_case , __snake_case )
lowerCamelCase :Dict = scores.copy()
# instantiate all dist processors
lowerCamelCase :List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase :Union[str, Any] = FlaxTopKLogitsWarper(3 )
lowerCamelCase :Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase :Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__snake_case )
lowerCamelCase :Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__snake_case )
lowerCamelCase :int = FlaxForcedEOSTokenLogitsProcessor(max_length=__snake_case , eos_token_id=__snake_case )
lowerCamelCase :Any = 10
# no processor list
def run_no_processor_list(__snake_case : int , __snake_case : Any , __snake_case : Optional[Any] ):
lowerCamelCase :Dict = temp_dist_warp(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :int = top_k_warp(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :str = top_p_warp(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :List[Any] = min_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :Dict = bos_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
lowerCamelCase :Optional[int] = eos_dist_proc(__snake_case , __snake_case , cur_len=__snake_case )
return scores
# with processor list
def run_processor_list(__snake_case : Union[str, Any] , __snake_case : int , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase :List[str] = processor(__snake_case , __snake_case , cur_len=__snake_case )
return scores
lowerCamelCase :Optional[Any] = jax.jit(__snake_case )
lowerCamelCase :Tuple = jax.jit(__snake_case )
lowerCamelCase :List[str] = jitted_run_no_processor_list(__snake_case , __snake_case , __snake_case )
lowerCamelCase :int = jitted_run_processor_list(__snake_case , __snake_case , __snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(__snake_case , __snake_case , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 701
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49
| 0
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 702
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any ):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='''utf-8''' , check=__snake_case , )
assert hasattr(self , '''env''' )
def snake_case ( self : List[str] , __snake_case : List[str]=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-single" , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def snake_case ( self : str , __snake_case : Tuple ):
TrainingJobAnalytics(__snake_case ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
def snake_case ( self : Union[str, Any] ):
# create estimator
lowerCamelCase :List[str] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
lowerCamelCase :Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase :Tuple = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowerCamelCase :int = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase :Tuple = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __snake_case )
| 703
|
import os
from math import logaa
def _lowerCamelCase ( a_ : str = "base_exp.txt"):
lowerCamelCase :float = 0
lowerCamelCase :Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))):
lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''',''')))
if x * logaa(a_) > largest:
lowerCamelCase :List[Any] = x * logaa(a_)
lowerCamelCase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
| 0
|
'''simple docstring'''
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
from __future__ import annotations
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : int ):
lowerCamelCase :Tuple = order
# a_{0} ... a_{k}
lowerCamelCase :Dict = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowerCamelCase :Optional[int] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowerCamelCase :Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
lowerCamelCase :Any = [0.0] * self.order
def snake_case ( self : List[str] , __snake_case : list[float] , __snake_case : list[float] ):
if len(__snake_case ) < self.order:
lowerCamelCase :int = [1.0, *a_coeffs]
if len(__snake_case ) != self.order + 1:
lowerCamelCase :str = (
F"Expected a_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(__snake_case )}"
)
raise ValueError(__snake_case )
if len(__snake_case ) != self.order + 1:
lowerCamelCase :str = (
F"Expected b_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(__snake_case )}"
)
raise ValueError(__snake_case )
lowerCamelCase :Optional[int] = a_coeffs
lowerCamelCase :Tuple = b_coeffs
def snake_case ( self : Tuple , __snake_case : float ):
lowerCamelCase :int = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowerCamelCase :Union[str, Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowerCamelCase :Optional[Any] = self.input_history[:-1]
lowerCamelCase :Union[str, Any] = self.output_history[:-1]
lowerCamelCase :int = sample
lowerCamelCase :int = result
return result
| 705
|
import math
def _lowerCamelCase ( a_ : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( a_ : float = 0.1):
lowerCamelCase :Dict = 3
lowerCamelCase :List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
A__ = logging.get_logger(__name__)
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : Optional[Any] ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 706
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = -1
lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :Optional[int] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[Any] = -1
lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case )
lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowerCamelCase :Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[str] = -1
lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :int = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 )
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 49
| 0
|
from collections import deque
class _lowerCAmelCase :
def __init__( self : Union[str, Any] , __snake_case : str , __snake_case : int , __snake_case : int ):
lowerCamelCase :List[Any] = process_name # process name
lowerCamelCase :int = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCamelCase :int = arrival_time
lowerCamelCase :int = burst_time # remaining burst time
lowerCamelCase :Tuple = 0 # total time of the process wait in ready queue
lowerCamelCase :Any = 0 # time from arrival time to completion time
class _lowerCAmelCase :
def __init__( self : List[str] , __snake_case : int , __snake_case : list[int] , __snake_case : deque[Process] , __snake_case : int , ):
# total number of mlfq's queues
lowerCamelCase :List[Any] = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCamelCase :int = time_slices
# unfinished process is in this ready_queue
lowerCamelCase :str = queue
# current time
lowerCamelCase :Optional[Any] = current_time
# finished process is in this sequence queue
lowerCamelCase :deque[Process] = deque()
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case ( self : List[str] , __snake_case : list[Process] ):
lowerCamelCase :str = []
for i in range(len(__snake_case ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case ( self : Union[str, Any] , __snake_case : list[Process] ):
lowerCamelCase :Optional[Any] = []
for i in range(len(__snake_case ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case ( self : Any , __snake_case : list[Process] ):
lowerCamelCase :List[str] = []
for i in range(len(__snake_case ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case ( self : int , __snake_case : deque[Process] ):
return [q.burst_time for q in queue]
def snake_case ( self : Optional[Any] , __snake_case : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case ( self : List[str] , __snake_case : deque[Process] ):
lowerCamelCase :deque[Process] = deque() # sequence deque of finished process
while len(__snake_case ) != 0:
lowerCamelCase :Tuple = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__snake_case )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCamelCase :Dict = 0
# set the process's turnaround time because it is finished
lowerCamelCase :Any = self.current_time - cp.arrival_time
# set the completion time
lowerCamelCase :Dict = self.current_time
# add the process to queue that has finished queue
finished.append(__snake_case )
self.finish_queue.extend(__snake_case ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case ( self : Tuple , __snake_case : deque[Process] , __snake_case : int ):
lowerCamelCase :deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__snake_case ) ):
lowerCamelCase :List[str] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__snake_case )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCamelCase :List[Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__snake_case )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCamelCase :Optional[Any] = 0
# set the finish time
lowerCamelCase :Optional[int] = self.current_time
# update the process' turnaround time because it is finished
lowerCamelCase :Tuple = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__snake_case )
self.finish_queue.extend(__snake_case ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case ( self : str ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCamelCase :Union[str, Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A__ = Process("""P1""", 0, 53)
A__ = Process("""P2""", 0, 17)
A__ = Process("""P3""", 0, 68)
A__ = Process("""P4""", 0, 24)
A__ = 3
A__ = [17, 25]
A__ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
A__ = Process("""P1""", 0, 53)
A__ = Process("""P2""", 0, 17)
A__ = Process("""P3""", 0, 68)
A__ = Process("""P4""", 0, 24)
A__ = 3
A__ = [17, 25]
A__ = deque([Pa, Pa, Pa, Pa])
A__ = MLFQ(number_of_queues, time_slices, queue, 0)
A__ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 707
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer"
raise TypeError(a_)
if number < 1:
raise ValueError('''Input must be a positive integer''')
return -1 if len(prime_factors(a_)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[int] , __snake_case : Tuple , __snake_case : Any=7 , __snake_case : List[str]=3 , __snake_case : int=10 , __snake_case : Tuple=18 , __snake_case : List[Any]=30 , __snake_case : Optional[int]=400 , __snake_case : str=True , __snake_case : List[str]=None , __snake_case : Tuple=True , __snake_case : Union[str, Any]=[0.5, 0.5, 0.5] , __snake_case : Optional[int]=[0.5, 0.5, 0.5] , __snake_case : Optional[int]=None , ):
lowerCamelCase :str = size if size is not None else {'''shortest_edge''': 18}
lowerCamelCase :Tuple = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowerCamelCase :int = parent
lowerCamelCase :Union[str, Any] = batch_size
lowerCamelCase :Union[str, Any] = num_channels
lowerCamelCase :Optional[int] = num_frames
lowerCamelCase :List[str] = image_size
lowerCamelCase :Tuple = min_resolution
lowerCamelCase :Optional[int] = max_resolution
lowerCamelCase :Optional[Any] = do_resize
lowerCamelCase :Optional[int] = size
lowerCamelCase :Optional[Any] = do_normalize
lowerCamelCase :str = image_mean
lowerCamelCase :List[str] = image_std
lowerCamelCase :Tuple = crop_size
def snake_case ( self : List[str] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = VivitImageProcessor if is_vision_available() else None
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Tuple = VivitImageProcessingTester(self )
@property
def snake_case ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(__snake_case , '''image_std''' ) )
self.assertTrue(hasattr(__snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''do_center_crop''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowerCamelCase :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def snake_case ( self : List[str] ):
# Initialize image_processing
lowerCamelCase :str = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
lowerCamelCase :Tuple = prepare_video_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case , __snake_case )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
lowerCamelCase :List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase :Tuple = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case ( self : Dict ):
# Initialize image_processing
lowerCamelCase :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase :str = prepare_video_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case , __snake_case )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
lowerCamelCase :List[Any] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase :int = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case ( self : Tuple ):
# Initialize image_processing
lowerCamelCase :Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase :List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case , __snake_case )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
lowerCamelCase :Dict = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase :Tuple = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 708
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
| 0
|
from __future__ import annotations
class _lowerCAmelCase :
def __init__( self : Union[str, Any] , __snake_case : List[str]=None ):
lowerCamelCase :List[str] = data
lowerCamelCase :str = None
def __repr__( self : int ):
lowerCamelCase :Union[str, Any] = []
lowerCamelCase :Optional[Any] = self
while temp:
string_rep.append(F"{temp.data}" )
lowerCamelCase :str = temp.next
return "->".join(__snake_case )
def _lowerCamelCase ( a_ : list):
if not elements_list:
raise Exception('''The Elements List is empty''')
lowerCamelCase :Optional[int] = Node(elements_list[0])
for i in range(1 , len(a_)):
lowerCamelCase :List[str] = Node(elements_list[i])
lowerCamelCase :str = current.next
return head
def _lowerCamelCase ( a_ : Node):
if head_node is not None and isinstance(a_ , a_):
print_reverse(head_node.next)
print(head_node.data)
def _lowerCamelCase ( ):
from doctest import testmod
testmod()
lowerCamelCase :List[Any] = make_linked_list([14, 52, 14, 12, 43])
print('''Linked List:''')
print(a_)
print('''Elements in Reverse:''')
print_reverse(a_)
if __name__ == "__main__":
main()
| 709
|
def _lowerCamelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 49
| 0
|
class _lowerCAmelCase :
def __init__( self : int , __snake_case : int , __snake_case : List[Any]=None , __snake_case : List[str]=None ):
lowerCamelCase :Union[str, Any] = data
lowerCamelCase :str = previous
lowerCamelCase :Optional[int] = next_node
def __str__( self : List[Any] ):
return F"{self.data}"
def snake_case ( self : List[str] ):
return self.data
def snake_case ( self : int ):
return self.next
def snake_case ( self : str ):
return self.previous
class _lowerCAmelCase :
def __init__( self : Union[str, Any] , __snake_case : Any ):
lowerCamelCase :str = head
def __iter__( self : Union[str, Any] ):
return self
def snake_case ( self : Any ):
if not self.current:
raise StopIteration
else:
lowerCamelCase :Dict = self.current.get_data()
lowerCamelCase :Union[str, Any] = self.current.get_next()
return value
class _lowerCAmelCase :
def __init__( self : str ):
lowerCamelCase :Optional[Any] = None # First node in list
lowerCamelCase :Union[str, Any] = None # Last node in list
def __str__( self : Dict ):
lowerCamelCase :Tuple = self.head
lowerCamelCase :Any = []
while current is not None:
nodes.append(current.get_data() )
lowerCamelCase :Optional[Any] = current.get_next()
return " ".join(str(__snake_case ) for node in nodes )
def __contains__( self : List[Any] , __snake_case : int ):
lowerCamelCase :Optional[int] = self.head
while current:
if current.get_data() == value:
return True
lowerCamelCase :Union[str, Any] = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def snake_case ( self : Union[str, Any] ):
if self.head:
return self.head.get_data()
return None
def snake_case ( self : List[str] ):
if self.tail:
return self.tail.get_data()
return None
def snake_case ( self : Optional[Any] , __snake_case : Node ):
if self.head is None:
lowerCamelCase :Optional[int] = node
lowerCamelCase :Dict = node
else:
self.insert_before_node(self.head , __snake_case )
def snake_case ( self : List[str] , __snake_case : Node ):
if self.head is None:
self.set_head(__snake_case )
else:
self.insert_after_node(self.tail , __snake_case )
def snake_case ( self : int , __snake_case : int ):
lowerCamelCase :List[Any] = Node(__snake_case )
if self.head is None:
self.set_head(__snake_case )
else:
self.set_tail(__snake_case )
def snake_case ( self : Tuple , __snake_case : Node , __snake_case : Node ):
lowerCamelCase :Any = node
lowerCamelCase :Dict = node.previous
if node.get_previous() is None:
lowerCamelCase :Optional[Any] = node_to_insert
else:
lowerCamelCase :Optional[Any] = node_to_insert
lowerCamelCase :int = node_to_insert
def snake_case ( self : Optional[Any] , __snake_case : Node , __snake_case : Node ):
lowerCamelCase :Dict = node
lowerCamelCase :Optional[int] = node.next
if node.get_next() is None:
lowerCamelCase :Union[str, Any] = node_to_insert
else:
lowerCamelCase :Dict = node_to_insert
lowerCamelCase :List[str] = node_to_insert
def snake_case ( self : int , __snake_case : int , __snake_case : int ):
lowerCamelCase :Tuple = 1
lowerCamelCase :int = Node(__snake_case )
lowerCamelCase :List[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(__snake_case , __snake_case )
return
current_position += 1
lowerCamelCase :str = node.next
self.insert_after_node(self.tail , __snake_case )
def snake_case ( self : Any , __snake_case : int ):
lowerCamelCase :Dict = self.head
while node:
if node.get_data() == item:
return node
lowerCamelCase :List[Any] = node.get_next()
raise Exception('''Node not found''' )
def snake_case ( self : Any , __snake_case : Tuple ):
if (node := self.get_node(__snake_case )) is not None:
if node == self.head:
lowerCamelCase :Optional[Any] = self.head.get_next()
if node == self.tail:
lowerCamelCase :Optional[Any] = self.tail.get_previous()
self.remove_node_pointers(__snake_case )
@staticmethod
def snake_case ( __snake_case : Node ):
if node.get_next():
lowerCamelCase :Any = node.previous
if node.get_previous():
lowerCamelCase :Any = node.next
lowerCamelCase :Any = None
lowerCamelCase :Tuple = None
def snake_case ( self : List[str] ):
return self.head is None
def _lowerCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'data2vec-text'
def __init__( self : str , __snake_case : Optional[Any]=30522 , __snake_case : str=768 , __snake_case : Tuple=12 , __snake_case : List[str]=12 , __snake_case : Optional[Any]=3072 , __snake_case : Any="gelu" , __snake_case : str=0.1 , __snake_case : List[str]=0.1 , __snake_case : Optional[int]=512 , __snake_case : Tuple=2 , __snake_case : int=0.0_2 , __snake_case : List[Any]=1e-1_2 , __snake_case : str=1 , __snake_case : Tuple=0 , __snake_case : int=2 , __snake_case : Tuple="absolute" , __snake_case : List[Any]=True , __snake_case : int=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Tuple = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Union[str, Any] = hidden_act
lowerCamelCase :Optional[Any] = intermediate_size
lowerCamelCase :str = hidden_dropout_prob
lowerCamelCase :Tuple = attention_probs_dropout_prob
lowerCamelCase :Any = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Any = initializer_range
lowerCamelCase :Optional[Any] = layer_norm_eps
lowerCamelCase :List[str] = position_embedding_type
lowerCamelCase :Tuple = use_cache
lowerCamelCase :List[Any] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Tuple ):
if self.task == "multiple-choice":
lowerCamelCase :Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 711
|
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49
| 0
|
from __future__ import annotations
import math
def _lowerCamelCase ( a_ : int):
if num <= 0:
lowerCamelCase :Union[str, Any] = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(a_)
lowerCamelCase :Any = [True] * (num + 1)
lowerCamelCase :Union[str, Any] = []
lowerCamelCase :List[Any] = 2
lowerCamelCase :Dict = int(math.sqrt(a_))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(a_)
# Set multiples of start be False
for i in range(start * start , num + 1 , a_):
if sieve[i] is True:
lowerCamelCase :Optional[int] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(a_)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 712
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'vit_msn'
def __init__( self : List[str] , __snake_case : Union[str, Any]=768 , __snake_case : Any=12 , __snake_case : List[str]=12 , __snake_case : Any=3072 , __snake_case : Any="gelu" , __snake_case : int=0.0 , __snake_case : str=0.0 , __snake_case : List[Any]=0.0_2 , __snake_case : int=1e-0_6 , __snake_case : Any=224 , __snake_case : Dict=16 , __snake_case : Dict=3 , __snake_case : Union[str, Any]=True , **__snake_case : Union[str, Any] , ):
super().__init__(**__snake_case )
lowerCamelCase :Optional[int] = hidden_size
lowerCamelCase :int = num_hidden_layers
lowerCamelCase :int = num_attention_heads
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Tuple = hidden_act
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[str] = initializer_range
lowerCamelCase :Tuple = layer_norm_eps
lowerCamelCase :Tuple = image_size
lowerCamelCase :List[str] = patch_size
lowerCamelCase :Dict = num_channels
lowerCamelCase :Union[str, Any] = qkv_bias
| 713
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ):
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Any = image_size
lowerCamelCase :Union[str, Any] = patch_size
lowerCamelCase :Any = num_channels
lowerCamelCase :List[Any] = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :Any = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :List[str] = hidden_act
lowerCamelCase :List[str] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :Optional[int] = initializer_range
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Any = scope
lowerCamelCase :Union[str, Any] = n_targets
lowerCamelCase :Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] ):
lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase :List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase :Optional[int] = []
for i in range(self.batch_size ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__snake_case )
lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case )
labels.append(__snake_case )
lowerCamelCase :str = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowerCamelCase :int = YolosForObjectDetection(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :str = model(pixel_values=__snake_case )
lowerCamelCase :Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : int ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs
lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_UpperCAmelCase = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ):
lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase :Dict = []
for i in range(self.model_tester.batch_size ):
lowerCamelCase :Optional[Any] = {}
lowerCamelCase :List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long )
lowerCamelCase :str = torch.ones(
self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float )
labels.append(__snake_case )
lowerCamelCase :Union[str, Any] = labels
return inputs_dict
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = YolosModelTester(self )
lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Tuple ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Tuple = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :int = True
# in YOLOS, the seq_len is different
lowerCamelCase :str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase :str = True
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = True
lowerCamelCase :int = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Optional[Any] = True
lowerCamelCase :str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Tuple = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Optional[int] = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :List[Any] = True
lowerCamelCase :Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = 1
self.assertEqual(out_len + added_hidden_states , len(__snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[str] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Optional[Any] = outputs.hidden_states
lowerCamelCase :Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# YOLOS has a different seq_length
lowerCamelCase :List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Any = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCamelCase :int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , )
lowerCamelCase :Any = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify postprocessing
lowerCamelCase :List[str] = image_processor.post_process_object_detection(
__snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case )
lowerCamelCase :str = [75, 75, 17, 63, 17]
lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
| 49
| 0
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCAmelCase ( a_ : Union[str, Any] , a_ : Union[str, Any]):
lowerCamelCase :Dict = []
for i in range(encoder_config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias"))
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
])
return rename_keys
def _lowerCAmelCase ( a_ : Dict , a_ : Tuple):
for i in range(encoder_config.num_hidden_layers):
# queries, keys and values (only weights, no biases)
lowerCamelCase :int = state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight")
lowerCamelCase :Optional[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
lowerCamelCase :Optional[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowerCamelCase :Any = in_proj_weight[
-encoder_config.hidden_size :, :
]
def _lowerCAmelCase ( a_ : Union[str, Any] , a_ : Dict , a_ : Optional[int]):
lowerCamelCase :Optional[int] = dct.pop(a_)
lowerCamelCase :List[Any] = val
def _lowerCAmelCase ( a_ : Any):
if "handwritten" in checkpoint_url:
lowerCamelCase :str = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase :List[str] = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw).convert('''RGB''')
return im
@torch.no_grad()
def _lowerCAmelCase ( a_ : List[str] , a_ : str):
lowerCamelCase :Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=a_)
lowerCamelCase :Union[str, Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowerCamelCase :str = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
lowerCamelCase :List[Any] = 10_24
lowerCamelCase :Optional[Any] = 40_96
lowerCamelCase :Optional[Any] = 24
lowerCamelCase :int = 16
lowerCamelCase :Any = 10_24
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''')
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase :Optional[Any] = False
lowerCamelCase :Tuple = '''relu'''
lowerCamelCase :Optional[int] = 10_24
lowerCamelCase :Dict = True
lowerCamelCase :Optional[int] = False
lowerCamelCase :List[Any] = False
# load HuggingFace model
lowerCamelCase :Any = ViTModel(a_ , add_pooling_layer=a_)
lowerCamelCase :Optional[Any] = TrOCRForCausalLM(a_)
lowerCamelCase :Union[str, Any] = VisionEncoderDecoderModel(encoder=a_ , decoder=a_)
model.eval()
# load state_dict of original model, rename some keys
lowerCamelCase :Union[str, Any] = torch.hub.load_state_dict_from_url(a_ , map_location='''cpu''' , check_hash=a_)['''model''']
lowerCamelCase :Any = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_)
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowerCamelCase :int = state_dict.pop(a_)
if key.startswith('''decoder''') and "output_projection" not in key:
lowerCamelCase :Optional[int] = val
else:
lowerCamelCase :Any = val
# load state dict
model.load_state_dict(a_)
# Check outputs on an image
lowerCamelCase :int = ViTImageProcessor(size=encoder_config.image_size)
lowerCamelCase :Any = RobertaTokenizer.from_pretrained('''roberta-large''')
lowerCamelCase :List[Any] = TrOCRProcessor(a_ , a_)
lowerCamelCase :int = processor(images=prepare_img(a_) , return_tensors='''pt''').pixel_values
# verify logits
lowerCamelCase :List[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]])
lowerCamelCase :str = model(pixel_values=a_ , decoder_input_ids=a_)
lowerCamelCase :Optional[Any] = outputs.logits
lowerCamelCase :Optional[Any] = torch.Size([1, 1, 5_02_65])
if "trocr-base-handwritten" in checkpoint_url:
lowerCamelCase :Dict = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311])
elif "trocr-large-handwritten" in checkpoint_url:
lowerCamelCase :Optional[int] = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170])
elif "trocr-base-printed" in checkpoint_url:
lowerCamelCase :str = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210])
elif "trocr-large-printed" in checkpoint_url:
lowerCamelCase :Tuple = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535])
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , a_ , atol=1e-3), "First elements of logits not as expected"
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
A__ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 714
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase :Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase :Any = test_metrics
@require_cpu
def snake_case ( self : Dict ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self : int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self : Any ):
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self : Optional[int] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 49
| 0
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def _lowerCamelCase ( a_ : ndarray):
return np.dot(a_ , a_)
class _lowerCAmelCase :
def __init__( self : Any , *,
__snake_case : float = np.inf , __snake_case : str = "linear" , __snake_case : float = 0.0 , ):
lowerCamelCase :Optional[int] = regularization
lowerCamelCase :Optional[Any] = gamma
if kernel == "linear":
lowerCamelCase :Tuple = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
lowerCamelCase :Tuple = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCamelCase :str = F"Unknown kernel: {kernel}"
raise ValueError(__snake_case )
def snake_case ( self : Union[str, Any] , __snake_case : ndarray , __snake_case : ndarray ):
return np.dot(__snake_case , __snake_case )
def snake_case ( self : List[Any] , __snake_case : ndarray , __snake_case : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def snake_case ( self : List[str] , __snake_case : list[ndarray] , __snake_case : ndarray ):
lowerCamelCase :str = observations
lowerCamelCase :Optional[Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
(lowerCamelCase ) :str = np.shape(__snake_case )
def to_minimize(__snake_case : ndarray ) -> float:
lowerCamelCase :Dict = 0
(lowerCamelCase ) :Dict = np.shape(__snake_case )
for i in range(__snake_case ):
for j in range(__snake_case ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__snake_case )
lowerCamelCase :Tuple = LinearConstraint(__snake_case , 0 , 0 )
lowerCamelCase :Dict = Bounds(0 , self.regularization )
lowerCamelCase :Union[str, Any] = minimize(
__snake_case , np.ones(__snake_case ) , bounds=__snake_case , constraints=[ly_contraint] ).x
lowerCamelCase :List[str] = l_star
# calculating mean offset of separation plane to points
lowerCamelCase :Union[str, Any] = 0
for i in range(__snake_case ):
for j in range(__snake_case ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowerCamelCase :Any = s / n
def snake_case ( self : str , __snake_case : ndarray ):
lowerCamelCase :List[Any] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __snake_case )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 716
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = LEDTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Any ):
super().setUp()
lowerCamelCase :Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :int = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : int , **__snake_case : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Dict , **__snake_case : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : Any ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def snake_case ( self : int ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''labels''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def snake_case ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.''']
lowerCamelCase :Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' )
lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' )
lowerCamelCase :Optional[int] = inputs['''input_ids''']
lowerCamelCase :Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']]
lowerCamelCase :str = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 49
| 0
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
A__ = TypeVar("""T""")
A__ = Union[List[T], Tuple[T, ...]]
A__ = Union[T, List[T], Dict[str, T]]
A__ = Union[str, bytes, os.PathLike]
| 717
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 718
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = KandinskyInpaintPipeline
_UpperCAmelCase = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_UpperCAmelCase = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_UpperCAmelCase = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase = False
@property
def snake_case ( self : Tuple ):
return 32
@property
def snake_case ( self : str ):
return 32
@property
def snake_case ( self : List[Any] ):
return self.time_input_dim
@property
def snake_case ( self : str ):
return self.time_input_dim * 4
@property
def snake_case ( self : Dict ):
return 100
@property
def snake_case ( self : List[Any] ):
lowerCamelCase :str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def snake_case ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCamelCase :Any = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowerCamelCase :List[str] = MultilingualCLIP(__snake_case )
lowerCamelCase :List[Any] = text_encoder.eval()
return text_encoder
@property
def snake_case ( self : str ):
torch.manual_seed(0 )
lowerCamelCase :List[Any] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCamelCase :Dict = UNetaDConditionModel(**__snake_case )
return model
@property
def snake_case ( self : List[Any] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCamelCase :str = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = self.dummy_text_encoder
lowerCamelCase :int = self.dummy_tokenizer
lowerCamelCase :Optional[Any] = self.dummy_unet
lowerCamelCase :str = self.dummy_movq
lowerCamelCase :Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__snake_case , )
lowerCamelCase :Optional[int] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def snake_case ( self : List[Any] , __snake_case : List[Any] , __snake_case : str=0 ):
lowerCamelCase :Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
lowerCamelCase :Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
lowerCamelCase :Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
lowerCamelCase :str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase :Tuple = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
lowerCamelCase :Tuple = np.ones((64, 64) , dtype=np.floataa )
lowerCamelCase :Tuple = 0
if str(__snake_case ).startswith('''mps''' ):
lowerCamelCase :Tuple = torch.manual_seed(__snake_case )
else:
lowerCamelCase :Dict = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
lowerCamelCase :Optional[int] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def snake_case ( self : str ):
lowerCamelCase :Dict = '''cpu'''
lowerCamelCase :List[str] = self.get_dummy_components()
lowerCamelCase :Tuple = self.pipeline_class(**__snake_case )
lowerCamelCase :str = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCamelCase :Optional[int] = pipe(**self.get_dummy_inputs(__snake_case ) )
lowerCamelCase :Optional[Any] = output.images
lowerCamelCase :Tuple = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
lowerCamelCase :Optional[Any] = image[0, -3:, -3:, -1]
lowerCamelCase :List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
lowerCamelCase :Optional[Any] = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def snake_case ( self : Dict ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
lowerCamelCase :List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCamelCase :str = np.ones((768, 768) , dtype=np.floataa )
lowerCamelCase :Optional[int] = 0
lowerCamelCase :Dict = '''a hat'''
lowerCamelCase :str = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
lowerCamelCase :Optional[int] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
lowerCamelCase :List[str] = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
lowerCamelCase :Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase :Dict = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowerCamelCase :Optional[int] = pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
lowerCamelCase :List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 719
|
import operator as op
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :int = []
lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation
lowerCamelCase :Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''')
print('''-''' * (30 + len(a_)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_) # append x to stack
# output in tabular format
print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
else:
lowerCamelCase :Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
lowerCamelCase :str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
stack.append(
str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , )
return int(stack[0])
if __name__ == "__main__":
A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 49
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
A__ = None
A__ = logging.get_logger(__name__)
A__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
A__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
A__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
A__ = """▁"""
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = AlbertTokenizer
def __init__( self : int , __snake_case : Optional[Any]=None , __snake_case : Dict=None , __snake_case : Optional[int]=True , __snake_case : str=True , __snake_case : Any=False , __snake_case : Union[str, Any]="[CLS]" , __snake_case : str="[SEP]" , __snake_case : Union[str, Any]="<unk>" , __snake_case : Union[str, Any]="[SEP]" , __snake_case : List[Any]="<pad>" , __snake_case : Optional[Any]="[CLS]" , __snake_case : Optional[Any]="[MASK]" , **__snake_case : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCamelCase :Optional[Any] = (
AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case , normalized=__snake_case )
if isinstance(__snake_case , __snake_case )
else mask_token
)
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
lowerCamelCase :List[Any] = do_lower_case
lowerCamelCase :str = remove_space
lowerCamelCase :Dict = keep_accents
lowerCamelCase :Union[str, Any] = vocab_file
lowerCamelCase :str = False if not self.vocab_file else True
def snake_case ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
lowerCamelCase :List[str] = [self.sep_token_id]
lowerCamelCase :int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case ( self : Tuple , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
lowerCamelCase :List[Any] = [self.sep_token_id]
lowerCamelCase :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__snake_case ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase :Optional[Any] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 720
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
A__ = """Hello, World!"""
A__ = """en_XX"""
def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool):
lowerCamelCase :int = Path('''data_bin''')
lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , )
xmod.eval() # disable dropout
print(a_)
lowerCamelCase :Any = xmod.model.encoder.sentence_encoder
lowerCamelCase :List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a_)
lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_)
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight
lowerCamelCase :List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i]
lowerCamelCase :List[str] = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase :Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError('''Dimensions of self-attention weights do not match.''')
lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight
lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias
lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight
lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase :Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''')
lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias
lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase :Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''')
lowerCamelCase :int = xmod_layer.fca.weight
lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias
# output
lowerCamelCase :List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''')
lowerCamelCase :str = xmod_layer.fca.weight
lowerCamelCase :int = xmod_layer.fca.bias
lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight
lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError('''Lists of language adapters do not match.''')
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code]
lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code]
lowerCamelCase :List[Any] = from_adapter.fca.weight
lowerCamelCase :List[Any] = from_adapter.fca.bias
lowerCamelCase :Dict = from_adapter.fca.weight
lowerCamelCase :Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight
lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase :Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(a_)
lowerCamelCase :Any = model(a_)[0]
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_))
else:
lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item()
print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3)
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''')
if not success:
raise Exception('''Something went wRoNg''')
Path(a_).mkdir(parents=a_ , exist_ok=a_)
print(F"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
A__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 49
| 0
|
def _lowerCamelCase ( a_ : list):
lowerCamelCase :Union[str, Any] = 0
while len(a_) > 1:
lowerCamelCase :List[Any] = 0
# Consider two files with minimum cost to be merged
for _ in range(2):
lowerCamelCase :int = files.index(min(a_))
temp += files[min_index]
files.pop(a_)
files.append(a_)
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A__ = 16
A__ = 32
def _lowerCamelCase ( a_ : Accelerator , a_ : int = 16 , a_ : str = "bert-base-cased"):
lowerCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained(a_)
lowerCamelCase :Optional[int] = load_dataset('''glue''' , '''mrpc''')
def tokenize_function(a_ : Optional[Any]):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase :Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=a_ , max_length=a_)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase :List[str] = datasets.map(
a_ , batched=a_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=a_)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase :str = tokenized_datasets.rename_column('''label''' , '''labels''')
def collate_fn(a_ : str):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a_ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''')
return tokenizer.pad(a_ , padding='''longest''' , return_tensors='''pt''')
# Instantiate dataloaders.
lowerCamelCase :Tuple = DataLoader(
tokenized_datasets['''train'''] , shuffle=a_ , collate_fn=a_ , batch_size=a_)
lowerCamelCase :Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=a_ , collate_fn=a_ , batch_size=a_)
return train_dataloader, eval_dataloader
def _lowerCamelCase ( a_ : Optional[int] , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Dict):
model.eval()
lowerCamelCase :Any = 0
for step, batch in enumerate(a_):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
lowerCamelCase :List[str] = model(**a_)
lowerCamelCase :Optional[int] = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
lowerCamelCase :Any = accelerator.gather(
(predictions, batch['''labels'''])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(a_) - 1:
lowerCamelCase :List[str] = predictions[: len(eval_dataloader.dataset) - samples_seen]
lowerCamelCase :str = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=a_ , references=a_ , )
lowerCamelCase :Union[str, Any] = metric.compute()
return eval_metric["accuracy"]
def _lowerCamelCase ( a_ : Tuple , a_ : int):
# Initialize accelerator
lowerCamelCase :Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase :Tuple = config['''lr''']
lowerCamelCase :Optional[int] = int(config['''num_epochs'''])
lowerCamelCase :int = int(config['''seed'''])
lowerCamelCase :int = int(config['''batch_size'''])
lowerCamelCase :Union[str, Any] = args.model_name_or_path
set_seed(a_)
lowerCamelCase :Union[str, Any] = get_dataloaders(a_ , a_ , a_)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase :List[Any] = AutoModelForSequenceClassification.from_pretrained(a_ , return_dict=a_)
# Instantiate optimizer
lowerCamelCase :Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase :int = optimizer_cls(params=model.parameters() , lr=a_)
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase :str = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCamelCase :Tuple = 1
lowerCamelCase :List[Any] = (len(a_) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase :List[str] = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=0 , num_training_steps=a_ , )
else:
lowerCamelCase :List[str] = DummyScheduler(a_ , total_num_steps=a_ , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase :str = accelerator.prepare(
a_ , a_ , a_ , a_ , a_)
# We need to keep track of how many total steps we have iterated over
lowerCamelCase :List[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase :int = 0
lowerCamelCase :List[Any] = evaluate.load('''glue''' , '''mrpc''')
lowerCamelCase :Any = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase :int = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint)
lowerCamelCase :Tuple = args.resume_from_checkpoint.split('''epoch_''')[1]
lowerCamelCase :str = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase :Optional[Any] = int(a_) + 1
lowerCamelCase :Optional[int] = evaluation_loop(a_ , a_ , a_ , a_)
accelerator.print('''resumed checkpoint performance:''' , a_)
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0])
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''])
with open(os.path.join(args.output_dir , F"state_{starting_epoch-1}.json") , '''r''') as f:
lowerCamelCase :Any = json.load(a_)
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase :Optional[int] = {}
for epoch in range(a_ , a_):
model.train()
for step, batch in enumerate(a_):
lowerCamelCase :List[str] = model(**a_)
lowerCamelCase :int = outputs.loss
lowerCamelCase :int = loss / gradient_accumulation_steps
accelerator.backward(a_)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase :List[Any] = F"epoch_{epoch}"
lowerCamelCase :Tuple = os.path.join(args.output_dir , a_)
accelerator.save_state(a_)
lowerCamelCase :Tuple = evaluation_loop(a_ , a_ , a_ , a_)
lowerCamelCase :int = accuracy
lowerCamelCase :Union[str, Any] = lr_scheduler.get_lr()[0]
lowerCamelCase :Any = optimizer.param_groups[0]['''lr''']
lowerCamelCase :List[Any] = epoch
lowerCamelCase :Optional[Any] = overall_step
accelerator.print(F"epoch {epoch}:" , a_)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"state_{epoch}.json") , '''w''') as f:
json.dump(a_ , a_)
def _lowerCamelCase ( ):
lowerCamelCase :List[str] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''')
parser.add_argument(
'''--model_name_or_path''' , type=a_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=a_ , )
parser.add_argument(
'''--output_dir''' , type=a_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=a_ , default=a_ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=a_ , default=a_ , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=a_ , default=2 , help='''Number of train epochs.''' , )
lowerCamelCase :str = parser.parse_args()
lowerCamelCase :Tuple = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(a_ , a_)
if __name__ == "__main__":
main()
| 700
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : str , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : int ):
lowerCamelCase :List[Any] = '''lower newer'''
lowerCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''lower newer'''
lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[str] = self.get_tokenizer()
lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' )
lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase :Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 49
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _lowerCamelCase ( a_ : Union[str, Any]):
lowerCamelCase :Dict = 3_84
if "tiny" in model_name:
lowerCamelCase :Optional[int] = [3, 3, 9, 3]
lowerCamelCase :int = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
lowerCamelCase :Any = [3, 3, 27, 3]
lowerCamelCase :Any = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
lowerCamelCase :List[str] = [3, 3, 27, 3]
lowerCamelCase :Optional[Any] = [1_28, 2_56, 5_12, 10_24]
lowerCamelCase :Any = 5_12
if "large" in model_name:
lowerCamelCase :int = [3, 3, 27, 3]
lowerCamelCase :Any = [1_92, 3_84, 7_68, 15_36]
lowerCamelCase :int = 7_68
if "xlarge" in model_name:
lowerCamelCase :int = [3, 3, 27, 3]
lowerCamelCase :Union[str, Any] = [2_56, 5_12, 10_24, 20_48]
lowerCamelCase :Any = 10_24
# set label information
lowerCamelCase :Optional[Any] = 1_50
lowerCamelCase :str = '''huggingface/label-files'''
lowerCamelCase :List[str] = '''ade20k-id2label.json'''
lowerCamelCase :Any = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase :List[Any] = ConvNextConfig(
depths=a_ , hidden_sizes=a_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''])
lowerCamelCase :List[Any] = UperNetConfig(
backbone_config=a_ , auxiliary_in_channels=a_ , num_labels=a_ , idalabel=a_ , labelaid=a_ , )
return config
def _lowerCamelCase ( a_ : List[Any]):
lowerCamelCase :str = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight'''))
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias'''))
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight'''))
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias'''))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter"))
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight"))
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias"))
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight"))
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias"))
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight"))
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias"))
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight"))
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias"))
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight"))
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias"))
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight"))
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias"))
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight"))
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias"))
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : int , a_ : List[str] , a_ : int):
lowerCamelCase :List[Any] = dct.pop(a_)
lowerCamelCase :List[str] = val
def _lowerCamelCase ( a_ : Union[str, Any] , a_ : Optional[Any] , a_ : int):
lowerCamelCase :Optional[int] = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
lowerCamelCase :List[Any] = model_name_to_url[model_name]
lowerCamelCase :Any = torch.hub.load_state_dict_from_url(a_ , map_location='''cpu''')['''state_dict''']
lowerCamelCase :Tuple = get_upernet_config(a_)
lowerCamelCase :Optional[int] = UperNetForSemanticSegmentation(a_)
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase :List[Any] = state_dict.pop(a_)
if "bn" in key:
lowerCamelCase :List[Any] = key.replace('''bn''' , '''batch_norm''')
lowerCamelCase :Optional[int] = val
# rename keys
lowerCamelCase :str = create_rename_keys(a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
model.load_state_dict(a_)
# verify on image
lowerCamelCase :Tuple = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw).convert('''RGB''')
lowerCamelCase :List[str] = SegformerImageProcessor()
lowerCamelCase :Any = processor(a_ , return_tensors='''pt''').pixel_values
with torch.no_grad():
lowerCamelCase :Any = model(a_)
if model_name == "upernet-convnext-tiny":
lowerCamelCase :Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]])
elif model_name == "upernet-convnext-small":
lowerCamelCase :Any = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]])
elif model_name == "upernet-convnext-base":
lowerCamelCase :Any = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]])
elif model_name == "upernet-convnext-large":
lowerCamelCase :List[Any] = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]])
elif model_name == "upernet-convnext-xlarge":
lowerCamelCase :Dict = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]])
print('''Logits:''' , outputs.logits[0, 0, :3, :3])
assert torch.allclose(outputs.logits[0, 0, :3, :3] , a_ , atol=1e-4)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub")
model.push_to_hub(F"openmmlab/{model_name}")
processor.push_to_hub(F"openmmlab/{model_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F'upernet-convnext-{size}' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
A__ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 701
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 0
|
import heapq
import sys
import numpy as np
A__ = tuple[int, int]
class _lowerCAmelCase :
def __init__( self : str ):
lowerCamelCase :int = []
lowerCamelCase :List[str] = set()
def snake_case ( self : List[str] ):
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case ( self : int ):
return len(self.elements ) == 0
def snake_case ( self : str , __snake_case : Union[str, Any] , __snake_case : Any ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__snake_case )
else:
# update
# print("update", item)
lowerCamelCase :Union[str, Any] = []
(lowerCamelCase) :str = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
(lowerCamelCase) :Tuple = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case ( self : List[str] , __snake_case : str ):
if item in self.set:
self.set.remove(__snake_case )
lowerCamelCase :Any = []
(lowerCamelCase) :Optional[Any] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
(lowerCamelCase) :int = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case ( self : str ):
return self.elements[0][1]
def snake_case ( self : List[str] ):
(lowerCamelCase) :str = heapq.heappop(self.elements )
self.set.remove(__snake_case )
return (priority, item)
def _lowerCamelCase ( a_ : TPos , a_ : TPos):
# euclidean distance
lowerCamelCase :Optional[int] = np.array(a_)
lowerCamelCase :Union[str, Any] = np.array(a_)
return np.linalg.norm(a - b)
def _lowerCamelCase ( a_ : TPos , a_ : TPos):
# integer division by time variable
return consistent_heuristic(a_ , a_) // t
def _lowerCamelCase ( a_ : TPos , a_ : TPos):
# manhattan distance
return abs(p[0] - goal[0]) + abs(p[1] - goal[1])
def _lowerCamelCase ( a_ : TPos , a_ : int , a_ : TPos , a_ : dict[TPos, float]):
lowerCamelCase :Optional[int] = g_function[start] + Wa * heuristics[i](a_ , a_)
return ans
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Tuple , a_ : str):
lowerCamelCase :List[str] = np.chararray((n, n))
for i in range(a_):
for j in range(a_):
lowerCamelCase :Dict = '''*'''
for i in range(a_):
for j in range(a_):
if (j, (n - 1) - i) in blocks:
lowerCamelCase :str = '''#'''
lowerCamelCase :Tuple = '''-'''
lowerCamelCase :Optional[int] = back_pointer[goal]
while x != start:
(lowerCamelCase) :List[str] = x
# print(x)
lowerCamelCase :int = '''-'''
lowerCamelCase :Dict = back_pointer[x]
lowerCamelCase :Optional[Any] = '''-'''
for i in range(a_):
for j in range(a_):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''')
print('''<-- End position''' , end=''' ''')
else:
print(grid[i][j] , end=''' ''')
print()
print('''^''')
print('''Start position''')
print()
print('''# is an obstacle''')
print('''- is the path taken by algorithm''')
print('''PATH TAKEN BY THE ALGORITHM IS:-''')
lowerCamelCase :str = back_pointer[goal]
while x != start:
print(a_ , end=''' ''')
lowerCamelCase :List[str] = back_pointer[x]
print(a_)
sys.exit()
def _lowerCamelCase ( a_ : TPos):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCamelCase ( a_ : Any , a_ : Optional[int] , a_ : Optional[int] , a_ : Dict , a_ : Dict , a_ : Tuple , a_ : str , a_ : Any , ):
for itera in range(a_):
open_list[itera].remove_element(a_)
# print("s", s)
# print("j", j)
(lowerCamelCase) :List[str] = s
lowerCamelCase :Tuple = (x - 1, y)
lowerCamelCase :Union[str, Any] = (x + 1, y)
lowerCamelCase :Optional[Any] = (x, y + 1)
lowerCamelCase :int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a_) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a_)
lowerCamelCase :Optional[int] = -1
lowerCamelCase :List[Any] = float('''inf''')
if valid(a_) and g_function[neighbours] > g_function[s] + 1:
lowerCamelCase :Dict = g_function[s] + 1
lowerCamelCase :Any = s
if neighbours not in close_list_anchor:
open_list[0].put(a_ , key(a_ , 0 , a_ , a_))
if neighbours not in close_list_inad:
for var in range(1 , a_):
if key(a_ , a_ , a_ , a_) <= Wa * key(
a_ , 0 , a_ , a_):
open_list[j].put(
a_ , key(a_ , a_ , a_ , a_))
def _lowerCamelCase ( ):
lowerCamelCase :int = []
for x in range(1 , 5):
for y in range(1 , 6):
some_list.append((x, y))
for x in range(15 , 20):
some_list.append((x, 17))
for x in range(10 , 19):
for y in range(1 , 15):
some_list.append((x, y))
# L block
for x in range(1 , 4):
for y in range(12 , 19):
some_list.append((x, y))
for x in range(3 , 13):
for y in range(16 , 19):
some_list.append((x, y))
return some_list
A__ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
A__ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
A__ = make_common_ground()
A__ = blocks_blk
# hyper parameters
A__ = 1
A__ = 1
A__ = 20
A__ = 3 # one consistent and two other inconsistent
# start and end destination
A__ = (0, 0)
A__ = (n - 1, n - 1)
A__ = 1
def _lowerCamelCase ( a_ : TPos , a_ : TPos , a_ : int):
lowerCamelCase :Dict = {start: 0, goal: float('''inf''')}
lowerCamelCase :Optional[Any] = {start: -1, goal: -1}
lowerCamelCase :Optional[Any] = []
lowerCamelCase :Any = set()
for i in range(a_):
open_list.append(PriorityQueue())
open_list[i].put(a_ , key(a_ , a_ , a_ , a_))
lowerCamelCase :list[int] = []
lowerCamelCase :list[int] = []
while open_list[0].minkey() < float('''inf'''):
for i in range(1 , a_):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf'''):
do_something(a_ , a_ , a_)
else:
lowerCamelCase :Optional[int] = open_list[i].top_show()
visited.add(a_)
expand_state(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , )
close_list_inad.append(a_)
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf'''):
do_something(a_ , a_ , a_)
else:
lowerCamelCase :List[Any] = open_list[0].top_show()
visited.add(a_)
expand_state(
a_ , 0 , a_ , a_ , a_ , a_ , a_ , a_ , )
close_list_anchor.append(a_)
print('''No path found to goal''')
print()
for i in range(n - 1 , -1 , -1):
for j in range(a_):
if (j, i) in blocks:
print('''#''' , end=''' ''')
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''')
else:
print('''-''' , end=''' ''')
else:
print('''*''' , end=''' ''')
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''')
print()
print('''^''')
print('''Start position''')
print()
print('''# is an obstacle''')
print('''- is the path taken by algorithm''')
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 703
|
import os
from math import logaa
def _lowerCamelCase ( a_ : str = "base_exp.txt"):
lowerCamelCase :float = 0
lowerCamelCase :Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))):
lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''',''')))
if x * logaa(a_) > largest:
lowerCamelCase :List[Any] = x * logaa(a_)
lowerCamelCase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
| 0
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class _lowerCAmelCase :
def __init__( self : Any ):
lowerCamelCase :Union[str, Any] = {}
def snake_case ( self : Any , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : str=1 ):
if self.graph.get(__snake_case ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCamelCase :Dict = [[w, v]]
if not self.graph.get(__snake_case ):
lowerCamelCase :List[Any] = []
def snake_case ( self : int ):
return list(self.graph )
def snake_case ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ):
if self.graph.get(__snake_case ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__snake_case )
def snake_case ( self : Optional[int] , __snake_case : Tuple=-2 , __snake_case : int=-1 ):
if s == d:
return []
lowerCamelCase :int = []
lowerCamelCase :Tuple = []
if s == -2:
lowerCamelCase :str = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
lowerCamelCase :Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase :Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__snake_case )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase :int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__snake_case ) != 0:
lowerCamelCase :List[str] = stack[len(__snake_case ) - 1]
else:
lowerCamelCase :Optional[Any] = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return visited
def snake_case ( self : Union[str, Any] , __snake_case : List[Any]=-1 ):
if c == -1:
lowerCamelCase :str = floor(random() * 10000 ) + 10
for i in range(__snake_case ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase :Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(__snake_case , __snake_case , 1 )
def snake_case ( self : Optional[Any] , __snake_case : str=-2 ):
lowerCamelCase :List[str] = deque()
lowerCamelCase :List[str] = []
if s == -2:
lowerCamelCase :Any = list(self.graph )[0]
d.append(__snake_case )
visited.append(__snake_case )
while d:
lowerCamelCase :Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def snake_case ( self : str , __snake_case : str ):
lowerCamelCase :Union[str, Any] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def snake_case ( self : int , __snake_case : List[str] ):
return len(self.graph[u] )
def snake_case ( self : str , __snake_case : List[Any]=-2 ):
lowerCamelCase :List[Any] = []
lowerCamelCase :Dict = []
if s == -2:
lowerCamelCase :Union[str, Any] = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
lowerCamelCase :Optional[int] = s
lowerCamelCase :List[Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase :Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase :Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__snake_case ) != 0:
lowerCamelCase :Optional[int] = stack[len(__snake_case ) - 1]
else:
lowerCamelCase :Union[str, Any] = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return sorted_nodes
def snake_case ( self : int ):
lowerCamelCase :Any = []
lowerCamelCase :Optional[int] = []
lowerCamelCase :Any = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
lowerCamelCase :int = -2
lowerCamelCase :Union[str, Any] = []
lowerCamelCase :str = s
lowerCamelCase :str = False
lowerCamelCase :int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase :Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase :str = len(__snake_case ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase :Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase :Tuple = True
if len(__snake_case ) != 0:
lowerCamelCase :int = stack[len(__snake_case ) - 1]
else:
lowerCamelCase :Tuple = False
indirect_parents.append(__snake_case )
lowerCamelCase :Tuple = s
lowerCamelCase :str = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return list(__snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = []
lowerCamelCase :str = []
lowerCamelCase :List[str] = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
lowerCamelCase :List[str] = -2
lowerCamelCase :Optional[Any] = []
lowerCamelCase :Optional[Any] = s
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase :str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase :List[Any] = len(__snake_case ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase :Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase :Tuple = True
if len(__snake_case ) != 0:
lowerCamelCase :int = stack[len(__snake_case ) - 1]
else:
lowerCamelCase :Tuple = False
indirect_parents.append(__snake_case )
lowerCamelCase :int = s
lowerCamelCase :Tuple = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return False
def snake_case ( self : List[Any] , __snake_case : List[str]=-2 , __snake_case : Optional[Any]=-1 ):
lowerCamelCase :Union[str, Any] = time()
self.dfs(__snake_case , __snake_case )
lowerCamelCase :Optional[Any] = time()
return end - begin
def snake_case ( self : int , __snake_case : int=-2 ):
lowerCamelCase :List[Any] = time()
self.bfs(__snake_case )
lowerCamelCase :Tuple = time()
return end - begin
class _lowerCAmelCase :
def __init__( self : Union[str, Any] ):
lowerCamelCase :Any = {}
def snake_case ( self : Union[str, Any] , __snake_case : int , __snake_case : Dict , __snake_case : List[str]=1 ):
# check if the u exists
if self.graph.get(__snake_case ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCamelCase :int = [[w, v]]
# add the other way
if self.graph.get(__snake_case ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCamelCase :str = [[w, u]]
def snake_case ( self : Tuple , __snake_case : Optional[int] , __snake_case : Tuple ):
if self.graph.get(__snake_case ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__snake_case )
# the other way round
if self.graph.get(__snake_case ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__snake_case )
def snake_case ( self : int , __snake_case : int=-2 , __snake_case : List[Any]=-1 ):
if s == d:
return []
lowerCamelCase :int = []
lowerCamelCase :str = []
if s == -2:
lowerCamelCase :Dict = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
lowerCamelCase :Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase :List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__snake_case )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase :Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__snake_case ) != 0:
lowerCamelCase :Optional[int] = stack[len(__snake_case ) - 1]
else:
lowerCamelCase :Any = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return visited
def snake_case ( self : Dict , __snake_case : List[str]=-1 ):
if c == -1:
lowerCamelCase :str = floor(random() * 10000 ) + 10
for i in range(__snake_case ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase :Optional[int] = floor(random() * c ) + 1
if n != i:
self.add_pair(__snake_case , __snake_case , 1 )
def snake_case ( self : int , __snake_case : str=-2 ):
lowerCamelCase :Optional[int] = deque()
lowerCamelCase :List[Any] = []
if s == -2:
lowerCamelCase :Union[str, Any] = list(self.graph )[0]
d.append(__snake_case )
visited.append(__snake_case )
while d:
lowerCamelCase :str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def snake_case ( self : int , __snake_case : Any ):
return len(self.graph[u] )
def snake_case ( self : Tuple ):
lowerCamelCase :Optional[int] = []
lowerCamelCase :Optional[Any] = []
lowerCamelCase :Any = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
lowerCamelCase :Optional[Any] = -2
lowerCamelCase :Optional[int] = []
lowerCamelCase :int = s
lowerCamelCase :str = False
lowerCamelCase :List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase :List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase :Tuple = len(__snake_case ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase :Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase :str = True
if len(__snake_case ) != 0:
lowerCamelCase :Dict = stack[len(__snake_case ) - 1]
else:
lowerCamelCase :Tuple = False
indirect_parents.append(__snake_case )
lowerCamelCase :Union[str, Any] = s
lowerCamelCase :Any = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return list(__snake_case )
def snake_case ( self : Any ):
lowerCamelCase :Optional[Any] = []
lowerCamelCase :Union[str, Any] = []
lowerCamelCase :Any = list(self.graph )[0]
stack.append(__snake_case )
visited.append(__snake_case )
lowerCamelCase :Any = -2
lowerCamelCase :Dict = []
lowerCamelCase :Dict = s
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase :Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase :int = len(__snake_case ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase :Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase :Optional[Any] = True
if len(__snake_case ) != 0:
lowerCamelCase :Any = stack[len(__snake_case ) - 1]
else:
lowerCamelCase :List[Any] = False
indirect_parents.append(__snake_case )
lowerCamelCase :Any = s
lowerCamelCase :Any = ss
# check if se have reached the starting point
if len(__snake_case ) == 0:
return False
def snake_case ( self : List[Any] ):
return list(self.graph )
def snake_case ( self : Optional[int] , __snake_case : Dict=-2 , __snake_case : int=-1 ):
lowerCamelCase :str = time()
self.dfs(__snake_case , __snake_case )
lowerCamelCase :Tuple = time()
return end - begin
def snake_case ( self : List[str] , __snake_case : Union[str, Any]=-2 ):
lowerCamelCase :List[Any] = time()
self.bfs(__snake_case )
lowerCamelCase :Tuple = time()
return end - begin
| 704
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'ibert'
def __init__( self : Tuple , __snake_case : Optional[int]=30522 , __snake_case : List[Any]=768 , __snake_case : Union[str, Any]=12 , __snake_case : Tuple=12 , __snake_case : List[str]=3072 , __snake_case : int="gelu" , __snake_case : int=0.1 , __snake_case : Tuple=0.1 , __snake_case : Optional[Any]=512 , __snake_case : Any=2 , __snake_case : Union[str, Any]=0.0_2 , __snake_case : Optional[Any]=1e-1_2 , __snake_case : List[str]=1 , __snake_case : Optional[Any]=0 , __snake_case : List[Any]=2 , __snake_case : Optional[int]="absolute" , __snake_case : Union[str, Any]=False , __snake_case : List[str]="none" , **__snake_case : Dict , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Tuple = vocab_size
lowerCamelCase :List[str] = hidden_size
lowerCamelCase :Optional[int] = num_hidden_layers
lowerCamelCase :int = num_attention_heads
lowerCamelCase :Tuple = hidden_act
lowerCamelCase :Optional[Any] = intermediate_size
lowerCamelCase :Optional[int] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :int = max_position_embeddings
lowerCamelCase :Any = type_vocab_size
lowerCamelCase :int = initializer_range
lowerCamelCase :Optional[Any] = layer_norm_eps
lowerCamelCase :Union[str, Any] = position_embedding_type
lowerCamelCase :List[Any] = quant_mode
lowerCamelCase :Union[str, Any] = force_dequant
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : List[Any] ):
if self.task == "multiple-choice":
lowerCamelCase :List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 705
|
import math
def _lowerCamelCase ( a_ : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( a_ : float = 0.1):
lowerCamelCase :Dict = 3
lowerCamelCase :List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : int ):
lowerCamelCase :Optional[int] = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Optional[Any] = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
lowerCamelCase :List[Any] = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
lowerCamelCase :int = tempfile.mkdtemp()
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Tuple = os.path.join(self.tmpdirname , __snake_case )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
# load decoder from hub
lowerCamelCase :Dict = '''hf-internal-testing/ngram-beam-search-decoder'''
def snake_case ( self : Optional[int] , **__snake_case : Any ):
lowerCamelCase :Dict = self.add_kwargs_tokens_map.copy()
kwargs.update(__snake_case )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[int] , **__snake_case : List[Any] ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Union[str, Any] , **__snake_case : List[Any] ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__snake_case )
def snake_case ( self : Any ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Dict = self.get_tokenizer()
lowerCamelCase :Tuple = self.get_feature_extractor()
lowerCamelCase :Optional[int] = self.get_decoder()
lowerCamelCase :str = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase :Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __snake_case )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __snake_case )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCamelCase :Any = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Optional[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(__snake_case , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=__snake_case , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def snake_case ( self : str ):
lowerCamelCase :Optional[Any] = self.get_feature_extractor()
lowerCamelCase :List[Any] = self.get_tokenizer()
lowerCamelCase :Any = self.get_decoder()
lowerCamelCase :Any = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
lowerCamelCase :Tuple = floats_list((3, 1000) )
lowerCamelCase :Union[str, Any] = feature_extractor(__snake_case , return_tensors='''np''' )
lowerCamelCase :Optional[Any] = processor(__snake_case , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = self.get_feature_extractor()
lowerCamelCase :List[Any] = self.get_tokenizer()
lowerCamelCase :Optional[Any] = self.get_decoder()
lowerCamelCase :List[str] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
lowerCamelCase :Union[str, Any] = '''This is a test string'''
lowerCamelCase :Optional[int] = processor(text=__snake_case )
lowerCamelCase :Dict = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : Dict , __snake_case : List[str]=(2, 10, 16) , __snake_case : Tuple=77 ):
np.random.seed(__snake_case )
return np.random.rand(*__snake_case )
def snake_case ( self : Any ):
lowerCamelCase :Any = self.get_feature_extractor()
lowerCamelCase :int = self.get_tokenizer()
lowerCamelCase :int = self.get_decoder()
lowerCamelCase :int = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
lowerCamelCase :Dict = self._get_dummy_logits(shape=(10, 16) , seed=13 )
lowerCamelCase :Optional[Any] = processor.decode(__snake_case )
lowerCamelCase :Optional[int] = decoder.decode_beams(__snake_case )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def snake_case ( self : Any , __snake_case : str ):
lowerCamelCase :str = self.get_feature_extractor()
lowerCamelCase :str = self.get_tokenizer()
lowerCamelCase :Any = self.get_decoder()
lowerCamelCase :Any = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
lowerCamelCase :Tuple = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCamelCase :int = processor.batch_decode(__snake_case )
else:
with get_context(__snake_case ).Pool() as pool:
lowerCamelCase :List[str] = processor.batch_decode(__snake_case , __snake_case )
lowerCamelCase :List[Any] = list(__snake_case )
with get_context('''fork''' ).Pool() as p:
lowerCamelCase :int = decoder.decode_beams_batch(__snake_case , __snake_case )
lowerCamelCase :Optional[int] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__snake_case , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(__snake_case , decoded_processor.logit_score )
self.assertListEqual(__snake_case , decoded_processor.lm_score )
def snake_case ( self : Dict ):
lowerCamelCase :str = self.get_feature_extractor()
lowerCamelCase :int = self.get_tokenizer()
lowerCamelCase :List[Any] = self.get_decoder()
lowerCamelCase :str = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
lowerCamelCase :Dict = self._get_dummy_logits()
lowerCamelCase :Optional[int] = 15
lowerCamelCase :Optional[Any] = -20.0
lowerCamelCase :int = -4.0
lowerCamelCase :int = processor.batch_decode(
__snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
lowerCamelCase :List[str] = decoded_processor_out.text
lowerCamelCase :List[str] = list(__snake_case )
with get_context('''fork''' ).Pool() as pool:
lowerCamelCase :List[Any] = decoder.decode_beams_batch(
__snake_case , __snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
lowerCamelCase :Dict = [d[0][0] for d in decoded_decoder_out]
lowerCamelCase :Optional[int] = [d[0][2] for d in decoded_decoder_out]
lowerCamelCase :Tuple = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __snake_case )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __snake_case , atol=1e-3 ) )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __snake_case , atol=1e-3 ) )
def snake_case ( self : Dict ):
lowerCamelCase :List[str] = self.get_feature_extractor()
lowerCamelCase :List[Any] = self.get_tokenizer()
lowerCamelCase :List[str] = self.get_decoder()
lowerCamelCase :str = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
lowerCamelCase :Dict = self._get_dummy_logits()
lowerCamelCase :str = 2.0
lowerCamelCase :Any = 5.0
lowerCamelCase :Optional[Any] = -20.0
lowerCamelCase :List[Any] = True
lowerCamelCase :List[Any] = processor.batch_decode(
__snake_case , alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
lowerCamelCase :Dict = decoded_processor_out.text
lowerCamelCase :List[Any] = list(__snake_case )
decoder.reset_params(
alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
with get_context('''fork''' ).Pool() as pool:
lowerCamelCase :Dict = decoder.decode_beams_batch(
__snake_case , __snake_case , )
lowerCamelCase :Optional[Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __snake_case )
lowerCamelCase :List[Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase :Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase :Tuple = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCamelCase :Any = os.listdir(__snake_case )
lowerCamelCase :Tuple = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__snake_case , __snake_case )
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = snapshot_download('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase :Tuple = WavaVecaProcessorWithLM.from_pretrained(__snake_case )
lowerCamelCase :Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase :Union[str, Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCamelCase :List[Any] = os.listdir(__snake_case )
lowerCamelCase :Any = os.listdir(__snake_case )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__snake_case , __snake_case )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Tuple = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase :List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase :List[Any] = floats_list((3, 1000) )
lowerCamelCase :Optional[Any] = processor_wavaveca(__snake_case , return_tensors='''np''' )
lowerCamelCase :int = processor_auto(__snake_case , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
lowerCamelCase :Union[str, Any] = self._get_dummy_logits()
lowerCamelCase :Optional[Any] = processor_wavaveca.batch_decode(__snake_case )
lowerCamelCase :Optional[Any] = processor_auto.batch_decode(__snake_case )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def snake_case ( self : List[str] ):
lowerCamelCase :Optional[int] = self.get_feature_extractor()
lowerCamelCase :int = self.get_tokenizer()
lowerCamelCase :Dict = self.get_decoder()
lowerCamelCase :str = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def snake_case ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase :List[str] = self._get_dummy_logits()[0]
lowerCamelCase :Any = processor.decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def snake_case ( self : List[str] ):
lowerCamelCase :List[str] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCamelCase :Optional[int] = self._get_dummy_logits()
lowerCamelCase :List[str] = processor.batch_decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(__snake_case , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def snake_case ( self : Dict ):
import torch
lowerCamelCase :Union[str, Any] = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__snake_case )
lowerCamelCase :Optional[Any] = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16000 ) )
lowerCamelCase :Dict = iter(__snake_case )
lowerCamelCase :Tuple = next(__snake_case )
lowerCamelCase :str = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
lowerCamelCase :Dict = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCamelCase :Optional[Any] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
lowerCamelCase :Dict = model(__snake_case ).logits.cpu().numpy()
lowerCamelCase :Dict = processor.decode(logits[0] , output_word_offsets=__snake_case )
lowerCamelCase :str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCamelCase :Tuple = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
lowerCamelCase :Dict = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(__snake_case , '''word''' ) ) , __snake_case )
self.assertEqual(''' '''.join(self.get_from_offsets(__snake_case , '''word''' ) ) , output.text )
# output times
lowerCamelCase :List[Any] = torch.tensor(self.get_from_offsets(__snake_case , '''start_time''' ) )
lowerCamelCase :Any = torch.tensor(self.get_from_offsets(__snake_case , '''end_time''' ) )
# fmt: off
lowerCamelCase :Union[str, Any] = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
lowerCamelCase :Optional[Any] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.0_1 ) )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.0_1 ) )
| 706
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = -1
lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :Optional[int] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[Any] = -1
lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case )
lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowerCamelCase :Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[str] = -1
lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :int = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 )
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 49
| 0
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _lowerCamelCase ( a_ : Optional[int]=32 , a_ : Any=10 , a_ : Dict=1_00 , a_ : int=10_26 , a_ : List[str]=True , a_ : Dict="data/tokenized_stories_train_wikitext103.jbl" , a_ : int="igf_context_pairs.jbl" , ):
set_seed(3)
# generate train_data and objective_set
lowerCamelCase :Any = generate_datasets(
a_ , a_ , number=a_ , min_len=10_26 , trim=a_)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowerCamelCase :Tuple = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''')
# load pretrained model
lowerCamelCase :List[str] = load_gpta('''gpt2''').to(a_)
print('''computing perplexity on objective set''')
lowerCamelCase :Tuple = compute_perplexity(a_ , a_ , a_).item()
print('''perplexity on objective set:''' , a_)
# collect igf pairs and save to file demo.jbl
collect_objective_set(a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : List[Any]=15 , a_ : List[Any]=1_28 , a_ : Optional[Any]=1_00 , a_ : Tuple="igf_model.pt" , ):
set_seed(42)
# Load pre-trained model
lowerCamelCase :Tuple = GPTaLMHeadModel.from_pretrained('''gpt2''')
# Initialize secondary learner to use embedding weights of model
lowerCamelCase :List[Any] = SecondaryLearner(a_)
# Train secondary learner
lowerCamelCase :Tuple = train_secondary_learner(
a_ , a_ , max_epochs=a_ , batch_size=a_ , eval_freq=1_00 , igf_model_path=a_ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _lowerCamelCase ( a_ : Union[str, Any] , a_ : int , a_ : Optional[Any] , a_ : List[Any]=32 , a_ : Tuple=10_00 , a_ : List[Any]=16 , a_ : List[str]=1.0 , a_ : Tuple=recopy_gpta , a_ : Tuple=None , a_ : List[Any]=10 , a_ : str="gpt2_finetuned.pt" , ):
lowerCamelCase :int = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''')
lowerCamelCase :Any = RandomSampler(a_)
lowerCamelCase :Any = DataLoader(a_ , sampler=a_)
lowerCamelCase :Optional[Any] = max_steps // (len(a_)) + 1
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = torch.zeros((1, context_len) , dtype=torch.long , device=a_)
lowerCamelCase :List[str] = recopy_model(a_ , a_ , a_)
model.train()
if secondary_learner is not None:
secondary_learner.to(a_)
secondary_learner.eval()
lowerCamelCase :Optional[int] = []
lowerCamelCase :Any = 0
lowerCamelCase :Optional[Any] = []
lowerCamelCase :Union[str, Any] = []
# Compute the performance of the transformer model at the beginning
lowerCamelCase :Optional[Any] = compute_perplexity(a_ , a_ , a_)
test_perps.append(a_)
print('''Test perplexity, step''' , a_ , ''':''' , a_)
for epoch in range(int(a_)):
for step, example in enumerate(a_):
torch.cuda.empty_cache()
lowerCamelCase :List[str] = random.randint(0 , example.size(2) - context_len - 1)
lowerCamelCase :Dict = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowerCamelCase :Optional[Any] = model(a_ , labels=a_)
lowerCamelCase :Optional[Any] = True
if secondary_learner is not None:
lowerCamelCase :Optional[Any] = secondary_learner.forward(
torch.tensor(a_ , dtype=torch.long , device=a_).unsqueeze(0))[0].item()
observed_qs.append(float(a_))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowerCamelCase :str = -1
if predicted_q < threshold:
lowerCamelCase :Tuple = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
lowerCamelCase :int = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowerCamelCase :Optional[int] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowerCamelCase :int = compute_perplexity(a_ , a_ , a_)
test_perps.append(a_)
print('''Test perplexity, step''' , a_ , ''':''' , a_)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , a_)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''')
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a_ , type=a_ , required=a_ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=a_ , default=a_ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=a_ , default=a_ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=a_ , type=a_ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=a_ , default=a_ , help='''A seed for reproducible training.''')
parser.add_argument(
'''--context_len''' , default=32 , type=a_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_00 , type=a_ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_00 , type=a_ , help='''secondary model evaluation is triggered at eval_freq''')
parser.add_argument('''--max_steps''' , default=10_00 , type=a_ , help='''To calculate training epochs''')
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_28 , type=a_ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=a_ , help='''batch size of training data of language model(gpt2) ''')
parser.add_argument(
'''--eval_interval''' , default=10 , type=a_ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_00 , type=a_ , help='''The number of examples split to be used as objective_set/test_data''')
parser.add_argument(
'''--min_len''' , default=10_26 , type=a_ , help='''The minimum length of the article to be used as objective set''')
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=a_ , help='''number of epochs to train secondary learner''')
parser.add_argument('''--trim''' , default=a_ , type=a_ , help='''truncate the example if it exceeds context length''')
parser.add_argument(
'''--threshold''' , default=1.0 , type=a_ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a_ , help='''finetuned_model_name''')
parser.add_argument(
'''--recopy_model''' , default=a_ , type=a_ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=a_ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
lowerCamelCase :str = joblib.load('''data/IGF_values.jbl''')
# Train secondary learner
lowerCamelCase :Tuple = training_secondary_learner(
a_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
lowerCamelCase :Union[str, Any] = GPTaLMHeadModel.from_pretrained('''gpt2''')
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
lowerCamelCase :int = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_00 , min_len=10_26 , trim=a_)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a_ , a_ , a_ , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=a_ , secondary_learner=a_ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 707
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer"
raise TypeError(a_)
if number < 1:
raise ValueError('''Input must be a positive integer''')
return -1 if len(prime_factors(a_)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = KandinskyImgaImgPipeline
_UpperCAmelCase = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
_UpperCAmelCase = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
_UpperCAmelCase = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase = False
@property
def snake_case ( self : Optional[int] ):
return 32
@property
def snake_case ( self : Union[str, Any] ):
return 32
@property
def snake_case ( self : Union[str, Any] ):
return self.time_input_dim
@property
def snake_case ( self : List[str] ):
return self.time_input_dim * 4
@property
def snake_case ( self : List[str] ):
return 100
@property
def snake_case ( self : List[str] ):
lowerCamelCase :int = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def snake_case ( self : int ):
torch.manual_seed(0 )
lowerCamelCase :Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowerCamelCase :Tuple = MultilingualCLIP(__snake_case )
lowerCamelCase :Dict = text_encoder.eval()
return text_encoder
@property
def snake_case ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCamelCase :Tuple = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCamelCase :Dict = UNetaDConditionModel(**__snake_case )
return model
@property
def snake_case ( self : Dict ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case ( self : int ):
torch.manual_seed(0 )
lowerCamelCase :Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = self.dummy_text_encoder
lowerCamelCase :Any = self.dummy_tokenizer
lowerCamelCase :Optional[int] = self.dummy_unet
lowerCamelCase :Union[str, Any] = self.dummy_movq
lowerCamelCase :int = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowerCamelCase :List[str] = DDIMScheduler(**__snake_case )
lowerCamelCase :Tuple = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def snake_case ( self : Tuple , __snake_case : str , __snake_case : Tuple=0 ):
lowerCamelCase :Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
lowerCamelCase :str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
lowerCamelCase :List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
lowerCamelCase :Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase :Optional[int] = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((256, 256) )
if str(__snake_case ).startswith('''mps''' ):
lowerCamelCase :Any = torch.manual_seed(__snake_case )
else:
lowerCamelCase :Dict = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
lowerCamelCase :Union[str, Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = '''cpu'''
lowerCamelCase :List[str] = self.get_dummy_components()
lowerCamelCase :Any = self.pipeline_class(**__snake_case )
lowerCamelCase :Union[str, Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCamelCase :Dict = pipe(**self.get_dummy_inputs(__snake_case ) )
lowerCamelCase :List[str] = output.images
lowerCamelCase :Any = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
lowerCamelCase :Tuple = image[0, -3:, -3:, -1]
lowerCamelCase :Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase :Dict = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
lowerCamelCase :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCamelCase :int = '''A red cartoon frog, 4k'''
lowerCamelCase :Dict = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
lowerCamelCase :Optional[Any] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
lowerCamelCase :Optional[Any] = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
lowerCamelCase :int = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase :int = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowerCamelCase :int = pipeline(
__snake_case , image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
lowerCamelCase :Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 708
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
| 0
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = StableUnCLIPPipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCAmelCase = False
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :List[str] = 32
lowerCamelCase :List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase :Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase :Union[str, Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=__snake_case , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase :Optional[int] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__snake_case , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase :List[Any] = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=__snake_case , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase :Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case )
lowerCamelCase :Union[str, Any] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase :Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase :Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase :List[str] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , )
torch.manual_seed(0 )
lowerCamelCase :List[str] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=__snake_case , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase :Union[str, Any] = AutoencoderKL()
lowerCamelCase :int = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def snake_case ( self : int , __snake_case : Tuple , __snake_case : str=0 ):
if str(__snake_case ).startswith('''mps''' ):
lowerCamelCase :int = torch.manual_seed(__snake_case )
else:
lowerCamelCase :str = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
lowerCamelCase :int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def snake_case ( self : Optional[Any] ):
lowerCamelCase :List[Any] = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=__snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Dict = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__snake_case )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Any ):
lowerCamelCase :List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
lowerCamelCase :List[str] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase :Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase :Optional[Any] = pipe('''anime turle''' , generator=__snake_case , output_type='''np''' )
lowerCamelCase :Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase :str = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
lowerCamelCase :Union[str, Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase :Optional[int] = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
lowerCamelCase :str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 709
|
def _lowerCamelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 49
| 0
|
def _lowerCamelCase ( a_ : str):
assert column_title.isupper()
lowerCamelCase :List[Any] = 0
lowerCamelCase :int = len(a_) - 1
lowerCamelCase :Dict = 0
while index >= 0:
lowerCamelCase :List[Any] = (ord(column_title[index]) - 64) * pow(26 , a_)
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A__ = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711
|
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49
| 0
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _lowerCamelCase ( a_ : List[Any] , a_ : List[str] , a_ : Optional[Any]):
lowerCamelCase :List[Any] = 1.5
lowerCamelCase :Any = int(factor * num_class_images)
lowerCamelCase :List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=a_ , aesthetic_weight=0.1)
os.makedirs(F"{class_data_dir}/images" , exist_ok=a_)
if len(list(Path(F"{class_data_dir}/images").iterdir())) >= num_class_images:
return
while True:
lowerCamelCase :Dict = client.query(text=a_)
if len(a_) >= factor * num_class_images or num_images > 1e4:
break
else:
lowerCamelCase :Tuple = int(factor * num_images)
lowerCamelCase :Optional[int] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=a_ , aesthetic_weight=0.1 , )
lowerCamelCase :List[str] = 0
lowerCamelCase :Tuple = 0
lowerCamelCase :Union[str, Any] = tqdm(desc='''downloading real regularization images''' , total=a_)
with open(F"{class_data_dir}/caption.txt" , '''w''') as fa, open(F"{class_data_dir}/urls.txt" , '''w''') as fa, open(
F"{class_data_dir}/images.txt" , '''w''') as fa:
while total < num_class_images:
lowerCamelCase :List[Any] = class_images[count]
count += 1
try:
lowerCamelCase :Dict = requests.get(images['''url'''])
if img.status_code == 2_00:
lowerCamelCase :Any = Image.open(BytesIO(img.content))
with open(F"{class_data_dir}/images/{total}.jpg" , '''wb''') as f:
f.write(img.content)
fa.write(images['''caption'''] + '''\n''')
fa.write(images['''url'''] + '''\n''')
fa.write(F"{class_data_dir}/images/{total}.jpg" + '''\n''')
total += 1
pbar.update(1)
else:
continue
except Exception:
continue
return
def _lowerCamelCase ( ):
lowerCamelCase :List[Any] = argparse.ArgumentParser('''''' , add_help=a_)
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=a_ , type=a_)
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=a_ , type=a_)
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=a_)
return parser.parse_args()
if __name__ == "__main__":
A__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 712
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
A__ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
_UpperCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_UpperCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_UpperCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_UpperCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case ( self : Dict , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Dict ):
lowerCamelCase :Any = ZeroShotClassificationPipeline(
model=__snake_case , tokenizer=__snake_case , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case ( self : int , __snake_case : str , __snake_case : List[Any] ):
lowerCamelCase :List[str] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(__snake_case , {'''sequence''': ANY(__snake_case ), '''labels''': [ANY(__snake_case )], '''scores''': [ANY(__snake_case )]} )
# No kwarg
lowerCamelCase :Any = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(__snake_case , {'''sequence''': ANY(__snake_case ), '''labels''': [ANY(__snake_case )], '''scores''': [ANY(__snake_case )]} )
lowerCamelCase :Any = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(__snake_case , {'''sequence''': ANY(__snake_case ), '''labels''': [ANY(__snake_case )], '''scores''': [ANY(__snake_case )]} )
lowerCamelCase :int = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
__snake_case , {'''sequence''': ANY(__snake_case ), '''labels''': [ANY(__snake_case ), ANY(__snake_case )], '''scores''': [ANY(__snake_case ), ANY(__snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
lowerCamelCase :Dict = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
__snake_case , {'''sequence''': ANY(__snake_case ), '''labels''': [ANY(__snake_case ), ANY(__snake_case )], '''scores''': [ANY(__snake_case ), ANY(__snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
lowerCamelCase :Optional[Any] = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(__snake_case , {'''sequence''': ANY(__snake_case ), '''labels''': [ANY(__snake_case )], '''scores''': [ANY(__snake_case )]} )
# https://github.com/huggingface/transformers/issues/13846
lowerCamelCase :List[str] = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''labels''': [ANY(__snake_case ), ANY(__snake_case )], '''scores''': [ANY(__snake_case ), ANY(__snake_case )]}
for i in range(1 )
] , )
lowerCamelCase :str = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''labels''': [ANY(__snake_case ), ANY(__snake_case )], '''scores''': [ANY(__snake_case ), ANY(__snake_case )]}
for i in range(2 )
] , )
with self.assertRaises(__snake_case ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(__snake_case ):
classifier(__snake_case , candidate_labels='''politics''' )
with self.assertRaises(__snake_case ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(__snake_case ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=__snake_case )
with self.assertRaises(__snake_case ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(__snake_case ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=__snake_case , )
self.run_entailment_id(__snake_case )
def snake_case ( self : Dict , __snake_case : Pipeline ):
lowerCamelCase :int = zero_shot_classifier.model.config
lowerCamelCase :Optional[int] = config.labelaid
lowerCamelCase :Optional[Any] = zero_shot_classifier.entailment_id
lowerCamelCase :List[Any] = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
lowerCamelCase :Any = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCamelCase :Tuple = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCamelCase :List[Any] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
lowerCamelCase :Any = original_labelaid
self.assertEqual(__snake_case , zero_shot_classifier.entailment_id )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :str = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[int] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
lowerCamelCase :Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__snake_case ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Any = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
lowerCamelCase :Union[str, Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__snake_case ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
lowerCamelCase :Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__snake_case ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
lowerCamelCase :List[str] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__snake_case , )
self.assertEqual(
nested_simplify(__snake_case ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Optional[Any] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
lowerCamelCase :Optional[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__snake_case ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
lowerCamelCase :int = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__snake_case , )
self.assertEqual(
nested_simplify(__snake_case ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 713
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ):
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Any = image_size
lowerCamelCase :Union[str, Any] = patch_size
lowerCamelCase :Any = num_channels
lowerCamelCase :List[Any] = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :Any = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :List[str] = hidden_act
lowerCamelCase :List[str] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :Optional[int] = initializer_range
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Any = scope
lowerCamelCase :Union[str, Any] = n_targets
lowerCamelCase :Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] ):
lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase :List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase :Optional[int] = []
for i in range(self.batch_size ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__snake_case )
lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case )
labels.append(__snake_case )
lowerCamelCase :str = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowerCamelCase :int = YolosForObjectDetection(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :str = model(pixel_values=__snake_case )
lowerCamelCase :Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : int ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs
lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_UpperCAmelCase = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ):
lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase :Dict = []
for i in range(self.model_tester.batch_size ):
lowerCamelCase :Optional[Any] = {}
lowerCamelCase :List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long )
lowerCamelCase :str = torch.ones(
self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float )
labels.append(__snake_case )
lowerCamelCase :Union[str, Any] = labels
return inputs_dict
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = YolosModelTester(self )
lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Tuple ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Tuple = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :int = True
# in YOLOS, the seq_len is different
lowerCamelCase :str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase :str = True
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = True
lowerCamelCase :int = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Optional[Any] = True
lowerCamelCase :str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Tuple = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Optional[int] = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :List[Any] = True
lowerCamelCase :Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = 1
self.assertEqual(out_len + added_hidden_states , len(__snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[str] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Optional[Any] = outputs.hidden_states
lowerCamelCase :Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# YOLOS has a different seq_length
lowerCamelCase :List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Any = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCamelCase :int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , )
lowerCamelCase :Any = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify postprocessing
lowerCamelCase :List[str] = image_processor.post_process_object_detection(
__snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case )
lowerCamelCase :str = [75, 75, 17, 63, 17]
lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
| 49
| 0
|
def _lowerCAmelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 714
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase :Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase :Any = test_metrics
@require_cpu
def snake_case ( self : Dict ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self : int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self : Any ):
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self : Optional[int] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 49
| 0
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple , __snake_case : Distribution , __snake_case : Any=None , __snake_case : Any=None , __snake_case : List[Any]=0 ):
lowerCamelCase :Dict = 1.0 if scale is None else scale
lowerCamelCase :Any = 0.0 if loc is None else loc
super().__init__(__snake_case , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__snake_case )] )
@property
def snake_case ( self : Optional[Any] ):
return self.base_dist.mean * self.scale + self.loc
@property
def snake_case ( self : Tuple ):
return self.base_dist.variance * self.scale**2
@property
def snake_case ( self : Dict ):
return self.variance.sqrt()
class _lowerCAmelCase ( nn.Module ):
def __init__( self : str , __snake_case : int , __snake_case : Dict[str, int] , __snake_case : Callable[..., Tuple[torch.Tensor]] , **__snake_case : str ):
super().__init__(**__snake_case )
lowerCamelCase :List[Any] = args_dim
lowerCamelCase :Any = nn.ModuleList([nn.Linear(__snake_case , __snake_case ) for dim in args_dim.values()] )
lowerCamelCase :Optional[int] = domain_map
def snake_case ( self : int , __snake_case : torch.Tensor ):
lowerCamelCase :int = [proj(__snake_case ) for proj in self.proj]
return self.domain_map(*__snake_case )
class _lowerCAmelCase ( nn.Module ):
def __init__( self : str , __snake_case : Optional[int] ):
super().__init__()
lowerCamelCase :Union[str, Any] = function
def snake_case ( self : List[str] , __snake_case : List[str] , *__snake_case : str ):
return self.function(__snake_case , *__snake_case )
class _lowerCAmelCase :
_UpperCAmelCase = 4_2
_UpperCAmelCase = 4_2
_UpperCAmelCase = 4_2
def __init__( self : Tuple , __snake_case : int = 1 ):
lowerCamelCase :Any = dim
lowerCamelCase :Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def snake_case ( self : Optional[int] , __snake_case : List[str] ):
if self.dim == 1:
return self.distribution_class(*__snake_case )
else:
return Independent(self.distribution_class(*__snake_case ) , 1 )
def snake_case ( self : str , __snake_case : Tuple , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , ):
lowerCamelCase :Dict = self._base_distribution(__snake_case )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__snake_case , loc=__snake_case , scale=__snake_case , event_dim=self.event_dim )
@property
def snake_case ( self : str ):
return () if self.dim == 1 else (self.dim,)
@property
def snake_case ( self : Any ):
return len(self.event_shape )
@property
def snake_case ( self : Optional[Any] ):
return 0.0
def snake_case ( self : Tuple , __snake_case : int ):
return ParameterProjection(
in_features=__snake_case , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def snake_case ( self : Optional[int] , *__snake_case : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def snake_case ( __snake_case : torch.Tensor ):
return (x + torch.sqrt(torch.square(__snake_case ) + 4.0 )) / 2.0
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {'df': 1, 'loc': 1, 'scale': 1}
_UpperCAmelCase = StudentT
@classmethod
def snake_case ( cls : Any , __snake_case : torch.Tensor , __snake_case : torch.Tensor , __snake_case : torch.Tensor ):
lowerCamelCase :List[str] = cls.squareplus(__snake_case ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase :Dict = 2.0 + cls.squareplus(__snake_case )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {'loc': 1, 'scale': 1}
_UpperCAmelCase = Normal
@classmethod
def snake_case ( cls : Optional[int] , __snake_case : torch.Tensor , __snake_case : torch.Tensor ):
lowerCamelCase :Union[str, Any] = cls.squareplus(__snake_case ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {'total_count': 1, 'logits': 1}
_UpperCAmelCase = NegativeBinomial
@classmethod
def snake_case ( cls : List[Any] , __snake_case : torch.Tensor , __snake_case : torch.Tensor ):
lowerCamelCase :Dict = cls.squareplus(__snake_case )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def snake_case ( self : Tuple , __snake_case : Optional[int] ):
lowerCamelCase :Tuple = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__snake_case , logits=__snake_case )
else:
return Independent(self.distribution_class(total_count=__snake_case , logits=__snake_case ) , 1 )
def snake_case ( self : int , __snake_case : str , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None ):
lowerCamelCase :int = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 715
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 0
|
from __future__ import annotations
A__ = 10
def _lowerCamelCase ( a_ : list[int]):
lowerCamelCase :Any = 1
lowerCamelCase :List[str] = max(a_)
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase :list[list] = [[] for _ in range(a_)]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase :Tuple = int((i / placement) % RADIX)
buckets[tmp].append(a_)
# put each buckets' contents into list_of_ints
lowerCamelCase :Optional[int] = 0
for b in range(a_):
for i in buckets[b]:
lowerCamelCase :List[Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = LEDTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Any ):
super().setUp()
lowerCamelCase :Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :int = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : int , **__snake_case : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Dict , **__snake_case : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : Any ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def snake_case ( self : int ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''labels''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def snake_case ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.''']
lowerCamelCase :Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' )
lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' )
lowerCamelCase :Optional[int] = inputs['''input_ids''']
lowerCamelCase :Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']]
lowerCamelCase :str = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 49
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : str , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : int ):
lowerCamelCase :List[Any] = '''lower newer'''
lowerCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''lower newer'''
lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[str] = self.get_tokenizer()
lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' )
lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase :Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 718
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 719
|
import operator as op
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :int = []
lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation
lowerCamelCase :Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''')
print('''-''' * (30 + len(a_)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_) # append x to stack
# output in tabular format
print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
else:
lowerCamelCase :Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
lowerCamelCase :str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
stack.append(
str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , )
return int(stack[0])
if __name__ == "__main__":
A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 49
| 0
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , __SCREAMING_SNAKE_CASE , )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = RobertaConfig
_UpperCAmelCase = 'roberta'
def __init__( self : Dict , __snake_case : Optional[int] ):
super().__init__(__snake_case )
lowerCamelCase :Optional[int] = RobertaEmbeddings(__snake_case )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , __SCREAMING_SNAKE_CASE , )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = RobertaConfig
_UpperCAmelCase = 'roberta'
def __init__( self : List[str] , __snake_case : Optional[Any] ):
super().__init__(__snake_case )
lowerCamelCase :Any = config.num_labels
lowerCamelCase :Union[str, Any] = config.num_hidden_layers
lowerCamelCase :Optional[int] = DeeRobertaModel(__snake_case )
lowerCamelCase :Dict = nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase :Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__snake_case )
def snake_case ( self : int , __snake_case : Optional[int]=None , __snake_case : int=None , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : Any=None , __snake_case : int=None , __snake_case : Union[str, Any]=None , __snake_case : Dict=-1 , __snake_case : Optional[Any]=False , ):
lowerCamelCase :List[str] = self.num_layers
try:
lowerCamelCase :Tuple = self.roberta(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , position_ids=__snake_case , head_mask=__snake_case , inputs_embeds=__snake_case , )
lowerCamelCase :int = outputs[1]
lowerCamelCase :str = self.dropout(__snake_case )
lowerCamelCase :List[Any] = self.classifier(__snake_case )
lowerCamelCase :Optional[int] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCamelCase :Union[str, Any] = e.message
lowerCamelCase :List[Any] = e.exit_layer
lowerCamelCase :Union[str, Any] = outputs[0]
if not self.training:
lowerCamelCase :List[str] = entropy(__snake_case )
lowerCamelCase :Union[str, Any] = []
lowerCamelCase :Dict = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCamelCase :List[str] = MSELoss()
lowerCamelCase :Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase :List[str] = CrossEntropyLoss()
lowerCamelCase :Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCamelCase :List[Any] = []
for highway_exit in outputs[-1]:
lowerCamelCase :Optional[int] = highway_exit[0]
if not self.training:
highway_logits_all.append(__snake_case )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCamelCase :Dict = MSELoss()
lowerCamelCase :int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase :int = CrossEntropyLoss()
lowerCamelCase :List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__snake_case )
if train_highway:
lowerCamelCase :Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCamelCase :Union[str, Any] = (loss,) + outputs
if not self.training:
lowerCamelCase :List[str] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCamelCase :Union[str, Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 720
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
A__ = """Hello, World!"""
A__ = """en_XX"""
def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool):
lowerCamelCase :int = Path('''data_bin''')
lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , )
xmod.eval() # disable dropout
print(a_)
lowerCamelCase :Any = xmod.model.encoder.sentence_encoder
lowerCamelCase :List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a_)
lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_)
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight
lowerCamelCase :List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i]
lowerCamelCase :List[str] = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase :Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError('''Dimensions of self-attention weights do not match.''')
lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight
lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias
lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight
lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase :Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''')
lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias
lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase :Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''')
lowerCamelCase :int = xmod_layer.fca.weight
lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias
# output
lowerCamelCase :List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''')
lowerCamelCase :str = xmod_layer.fca.weight
lowerCamelCase :int = xmod_layer.fca.bias
lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight
lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError('''Lists of language adapters do not match.''')
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code]
lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code]
lowerCamelCase :List[Any] = from_adapter.fca.weight
lowerCamelCase :List[Any] = from_adapter.fca.bias
lowerCamelCase :Dict = from_adapter.fca.weight
lowerCamelCase :Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight
lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase :Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(a_)
lowerCamelCase :Any = model(a_)[0]
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_))
else:
lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item()
print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3)
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''')
if not success:
raise Exception('''Something went wRoNg''')
Path(a_).mkdir(parents=a_ , exist_ok=a_)
print(F"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
A__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 49
| 0
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A__ : int = 250_004
A__ : Optional[Any] = 250_020
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = MBartaaTokenizer
_UpperCAmelCase = MBartaaTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def snake_case ( self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase :Optional[int] = MBartaaTokenizer(__snake_case , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Dict ):
lowerCamelCase :int = '''<s>'''
lowerCamelCase :int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__snake_case ) , 1054 )
def snake_case ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[Any] = MBartaaTokenizer(__snake_case , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__snake_case )
lowerCamelCase :List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase :List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
lowerCamelCase :Tuple = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase :str = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def snake_case ( self : Union[str, Any] ):
# fmt: off
lowerCamelCase :Optional[Any] = {'''input_ids''': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def snake_case ( self : Optional[Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase :Optional[Any] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Tuple = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = tempfile.mkdtemp()
lowerCamelCase :Union[str, Any] = tokenizer_r.save_pretrained(__snake_case )
lowerCamelCase :List[Any] = tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase :Union[str, Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
lowerCamelCase :Any = tokenizer_r.from_pretrained(__snake_case )
lowerCamelCase :Any = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=True
lowerCamelCase :Union[str, Any] = tempfile.mkdtemp()
lowerCamelCase :List[Any] = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
lowerCamelCase :List[str] = tokenizer_p.save_pretrained(__snake_case )
# Checks it save with the same files
self.assertSequenceEqual(__snake_case , __snake_case )
# Checks everything loads correctly in the same way
lowerCamelCase :Union[str, Any] = tokenizer_r.from_pretrained(__snake_case )
lowerCamelCase :Tuple = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
# Save tokenizer rust, legacy_format=False
lowerCamelCase :Optional[Any] = tempfile.mkdtemp()
lowerCamelCase :Optional[Any] = tokenizer_r.save_pretrained(__snake_case , legacy_format=__snake_case )
lowerCamelCase :Optional[Any] = tokenizer_p.save_pretrained(__snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase :Dict = tokenizer_r.from_pretrained(__snake_case )
lowerCamelCase :List[Any] = tokenizer_p.from_pretrained(__snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__snake_case , __snake_case ) )
shutil.rmtree(__snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
_UpperCAmelCase = 'facebook/mbart-large-50-one-to-many-mmt'
_UpperCAmelCase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_UpperCAmelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_UpperCAmelCase = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def snake_case ( cls : Any ):
lowerCamelCase :MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowerCamelCase :str = 1
return cls
def snake_case ( self : Union[str, Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 250038 )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
def snake_case ( self : Optional[int] ):
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
lowerCamelCase :Tuple = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCamelCase :List[str] = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
lowerCamelCase :Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def snake_case ( self : Tuple ):
lowerCamelCase :Any = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , __snake_case )
lowerCamelCase :Any = 10
lowerCamelCase :List[str] = self.tokenizer(__snake_case , max_length=__snake_case , truncation=__snake_case ).input_ids[0]
self.assertEqual(ids[0] , __snake_case )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__snake_case ) , __snake_case )
def snake_case ( self : Dict ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250053, 250001] )
def snake_case ( self : Any ):
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__snake_case )
lowerCamelCase :List[str] = MBartaaTokenizer.from_pretrained(__snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __snake_case )
@require_torch
def snake_case ( self : List[Any] ):
lowerCamelCase :Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__snake_case , return_tensors='''pt''' )
lowerCamelCase :Optional[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowerCamelCase :Any = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase :Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __snake_case )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = self.tokenizer(self.src_text , padding=__snake_case , truncation=__snake_case , max_length=3 , return_tensors='''pt''' )
lowerCamelCase :Tuple = self.tokenizer(
text_target=self.tgt_text , padding=__snake_case , truncation=__snake_case , max_length=10 , return_tensors='''pt''' )
lowerCamelCase :Optional[Any] = targets['''input_ids''']
lowerCamelCase :List[str] = shift_tokens_right(__snake_case , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case ( self : List[str] ):
lowerCamelCase :Tuple = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__snake_case ) , {
# en_XX, A, test, EOS
'''input_ids''': [[250004, 62, 3034, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 721
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 0
|
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _lowerCamelCase ( a_ : Any):
lowerCamelCase :Any = int(a_)
lowerCamelCase :Optional[int] = t // 36_00, (t // 60) % 60, t % 60
return F"{h}:{m:02d}:{s:02d}" if h != 0 else F"{m:02d}:{s:02d}"
def _lowerCamelCase ( a_ : Any , a_ : Union[str, Any] , a_ : Dict , a_ : List[str] , a_ : List[str]=3_00):
# docstyle-ignore
return F"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n "
def _lowerCamelCase ( a_ : int):
lowerCamelCase :int = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
lowerCamelCase :List[str] = F"{elt:.6f}" if isinstance(a_ , a_) else str(a_)
html_code += F" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCAmelCase :
_UpperCAmelCase = 5
_UpperCAmelCase = 0.2
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[str] = None , __snake_case : bool = True , __snake_case : Optional["NotebookTrainingTracker"] = None , __snake_case : int = 300 , ):
lowerCamelCase :Dict = total
lowerCamelCase :Tuple = '''''' if prefix is None else prefix
lowerCamelCase :Tuple = leave
lowerCamelCase :List[Any] = parent
lowerCamelCase :int = width
lowerCamelCase :Optional[Any] = None
lowerCamelCase :str = None
lowerCamelCase :List[Any] = None
def snake_case ( self : Optional[int] , __snake_case : int , __snake_case : bool = False , __snake_case : str = None ):
lowerCamelCase :Tuple = value
if comment is not None:
lowerCamelCase :int = comment
if self.last_value is None:
lowerCamelCase :Dict = time.time()
lowerCamelCase :List[str] = value
lowerCamelCase :Dict = None
lowerCamelCase :Tuple = self.warmup
lowerCamelCase :Dict = 1
self.update_bar(__snake_case )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
lowerCamelCase :Dict = time.time()
lowerCamelCase :int = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
lowerCamelCase :Optional[int] = self.elapsed_time / (value - self.start_value)
else:
lowerCamelCase :Optional[int] = None
if value >= self.total:
lowerCamelCase :List[str] = self.total
lowerCamelCase :List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
lowerCamelCase :List[str] = self.average_time_per_item * (self.total - value)
self.update_bar(__snake_case )
lowerCamelCase :Optional[Any] = value
lowerCamelCase :Tuple = current_time
if self.average_time_per_item is None:
lowerCamelCase :Optional[int] = 1
else:
lowerCamelCase :Union[str, Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def snake_case ( self : Optional[Any] , __snake_case : Tuple , __snake_case : List[Any]=None ):
lowerCamelCase :List[Any] = ''' ''' * (len(str(self.total ) ) - len(str(__snake_case ) )) + str(__snake_case )
if self.elapsed_time is None:
lowerCamelCase :str = F"[{spaced_value}/{self.total} : < :"
elif self.predicted_remaining is None:
lowerCamelCase :Optional[Any] = F"[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"
else:
lowerCamelCase :str = (
F"[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"
F" {format_time(self.predicted_remaining )}"
)
self.label += F", {1/self.average_time_per_item:.2f} it/s"
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F", {self.comment}]"
self.display()
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Dict = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
lowerCamelCase :Union[str, Any] = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case ( self : str ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : int , __snake_case : Optional[int] , __snake_case : List[Any]=None ):
super().__init__(__snake_case )
lowerCamelCase :List[Any] = None if column_names is None else [column_names]
lowerCamelCase :Optional[int] = None
def snake_case ( self : List[Any] ):
lowerCamelCase :int = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
lowerCamelCase :List[Any] = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case ( self : Union[str, Any] , __snake_case : Tuple ):
if self.inner_table is None:
lowerCamelCase :Union[str, Any] = [list(values.keys() ), list(values.values() )]
else:
lowerCamelCase :Optional[int] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__snake_case )
lowerCamelCase :List[Any] = columns
self.inner_table.append([values[c] for c in columns] )
def snake_case ( self : Union[str, Any] , __snake_case : List[str] , __snake_case : List[Any]=None , __snake_case : Tuple=300 ):
lowerCamelCase :Any = NotebookProgressBar(__snake_case , prefix=__snake_case , parent=self , width=__snake_case )
return self.child_bar
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = None
self.display()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Any ):
lowerCamelCase :Optional[Any] = None
lowerCamelCase :int = None
lowerCamelCase :Optional[int] = False
def snake_case ( self : Any , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Any , **__snake_case : Optional[int] ):
lowerCamelCase :int = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
lowerCamelCase :List[Any] = 0
lowerCamelCase :List[str] = 0
lowerCamelCase :Tuple = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
lowerCamelCase :Optional[Any] = NotebookTrainingTracker(state.max_steps , __snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , **__snake_case : Any ):
lowerCamelCase :List[Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else F"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1 , comment=F"Epoch {epoch}/{state.num_train_epochs}" , force_update=self._force_next_update , )
lowerCamelCase :Optional[int] = False
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : str , __snake_case : int=None , **__snake_case : Optional[Any] ):
if not has_length(__snake_case ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
lowerCamelCase :List[str] = self.training_tracker.add_child(len(__snake_case ) )
else:
lowerCamelCase :str = NotebookProgressBar(len(__snake_case ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def snake_case ( self : List[str] , __snake_case : Dict , __snake_case : Any , __snake_case : str , **__snake_case : List[Any] ):
if self.prediction_bar is not None:
self.prediction_bar.close()
lowerCamelCase :List[str] = None
def snake_case ( self : int , __snake_case : str , __snake_case : int , __snake_case : str , __snake_case : Any=None , **__snake_case : Tuple ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
lowerCamelCase :Tuple = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
lowerCamelCase :Dict = state.global_step
self.training_tracker.write_line(__snake_case )
def snake_case ( self : int , __snake_case : List[str] , __snake_case : List[str] , __snake_case : str , __snake_case : Union[str, Any]=None , **__snake_case : str ):
if self.training_tracker is not None:
lowerCamelCase :Optional[Any] = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
lowerCamelCase :Any = log['''loss''']
break
if self.first_column == "Epoch":
lowerCamelCase :Tuple = int(state.epoch )
else:
lowerCamelCase :Optional[int] = state.global_step
lowerCamelCase :Tuple = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
lowerCamelCase :Union[str, Any] = re.sub(R'''\_loss$''' , '''''' , __snake_case )
lowerCamelCase :List[Any] = metrics.pop('''total_flos''' , __snake_case )
lowerCamelCase :Union[str, Any] = metrics.pop('''epoch''' , __snake_case )
lowerCamelCase :List[Any] = metrics.pop(F"{metric_key_prefix}_runtime" , __snake_case )
lowerCamelCase :Dict = metrics.pop(F"{metric_key_prefix}_samples_per_second" , __snake_case )
lowerCamelCase :List[Any] = metrics.pop(F"{metric_key_prefix}_steps_per_second" , __snake_case )
lowerCamelCase :List[str] = metrics.pop(F"{metric_key_prefix}_jit_compilation_time" , __snake_case )
for k, v in metrics.items():
if k == F"{metric_key_prefix}_loss":
lowerCamelCase :List[str] = v
else:
lowerCamelCase :Any = k.split('''_''' )
lowerCamelCase :Optional[Any] = ''' '''.join([part.capitalize() for part in splits[1:]] )
lowerCamelCase :Union[str, Any] = v
self.training_tracker.write_line(__snake_case )
self.training_tracker.remove_child()
lowerCamelCase :Dict = None
# Evaluation takes a long time so we should force the next update.
lowerCamelCase :List[Any] = True
def snake_case ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Tuple , **__snake_case : Tuple ):
self.training_tracker.update(
state.global_step , comment=F"Epoch {int(state.epoch )}/{state.num_train_epochs}" , force_update=__snake_case )
lowerCamelCase :Any = None
| 700
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = DebertaTokenizer
_UpperCAmelCase = True
_UpperCAmelCase = DebertaTokenizerFast
def snake_case ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowerCamelCase :List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :Dict = {'''unk_token''': '''[UNK]'''}
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : str , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : int ):
lowerCamelCase :List[Any] = '''lower newer'''
lowerCamelCase :List[str] = '''lower newer'''
return input_text, output_text
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = '''lower newer'''
lowerCamelCase :str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase :Optional[int] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase :Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :List[str] = self.get_tokenizer()
lowerCamelCase :Optional[int] = tokenizer('''Hello''' , '''World''' )
lowerCamelCase :List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self : str ):
lowerCamelCase :List[str] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase :int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowerCamelCase :Tuple = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowerCamelCase :List[Any] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
lowerCamelCase :Any = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase :Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 49
| 0
|
from __future__ import annotations
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Union[str, Any] = [True] * limit
lowerCamelCase :Tuple = False
lowerCamelCase :List[str] = False
lowerCamelCase :List[Any] = True
for i in range(3 , int(limit**0.5 + 1) , 2):
lowerCamelCase :List[Any] = i * 2
while index < limit:
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :List[Any] = index + i
lowerCamelCase :List[str] = [2]
for i in range(3 , a_ , 2):
if is_prime[i]:
primes.append(a_)
return primes
def _lowerCamelCase ( a_ : int = 1_00_00_00):
lowerCamelCase :Tuple = prime_sieve(a_)
lowerCamelCase :str = 0
lowerCamelCase :Dict = 0
for i in range(len(a_)):
for j in range(i + length , len(a_)):
lowerCamelCase :Dict = sum(primes[i:j])
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase :str = j - i
lowerCamelCase :Optional[int] = sol
return largest
if __name__ == "__main__":
print(F'{solution() = }')
| 701
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
A__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
lowerCamelCase :Tuple = None
lowerCamelCase :Tuple = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCamelCase :Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase :Optional[int] = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCamelCase :Union[str, Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
lowerCamelCase :int = '''\n'''.join(__snake_case )
if special_strings is not None:
for string in special_strings:
lowerCamelCase :int = diff.replace(__snake_case , '''''' )
self.assertEqual(__snake_case , '''''' )
def snake_case ( self : Dict ):
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
self.one_complete_example('''complete_nlp_example.py''' , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCamelCase :Optional[int] = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('''complete_cv_example.py''' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = False
@classmethod
def snake_case ( cls : Optional[Any] ):
super().setUpClass()
lowerCamelCase :Any = tempfile.mkdtemp()
lowerCamelCase :Optional[int] = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCamelCase :List[str] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case ( self : int ):
lowerCamelCase :Any = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
lowerCamelCase :List[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :List[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
lowerCamelCase :Optional[int] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
lowerCamelCase :Union[str, Any] = torch.cuda.device_count()
else:
lowerCamelCase :Dict = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
else:
self.assertIn('''epoch 0:''' , __snake_case )
self.assertIn('''epoch 1:''' , __snake_case )
@slow
def snake_case ( self : Any ):
lowerCamelCase :Tuple = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCamelCase :Dict = run_command(self._launch_args + testargs , return_stdout=__snake_case )
lowerCamelCase :Tuple = re.findall('''({.+})''' , __snake_case )
lowerCamelCase :Optional[Any] = [r for r in results if '''accuracy''' in r][-1]
lowerCamelCase :List[str] = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case ( self : int ):
lowerCamelCase :Dict = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase :Tuple = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , '''tracking''' ) ) )
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :int = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__ = logging.get_logger(__name__)
A__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
A__ = {
"""gpt-neox-20b""": 2_048,
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : int , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Tuple=None , __snake_case : str="<|endoftext|>" , __snake_case : Dict="<|endoftext|>" , __snake_case : Optional[int]="<|endoftext|>" , __snake_case : Any=False , **__snake_case : Optional[int] , ):
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , unk_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , )
lowerCamelCase :List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __snake_case ) != add_prefix_space:
lowerCamelCase :int = getattr(__snake_case , pre_tok_state.pop('''type''' ) )
lowerCamelCase :str = add_prefix_space
lowerCamelCase :str = pre_tok_class(**__snake_case )
lowerCamelCase :Optional[int] = add_prefix_space
def snake_case ( self : List[str] , __snake_case : str , __snake_case : Optional[str] = None ):
lowerCamelCase :Any = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def snake_case ( self : Tuple , __snake_case : "Conversation" ):
lowerCamelCase :Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__snake_case , add_special_tokens=__snake_case ) + [self.eos_token_id] )
if len(__snake_case ) > self.model_max_length:
lowerCamelCase :Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 702
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 4_2
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : Union[str, Any] , __snake_case : int = 3 , __snake_case : int = 3 , __snake_case : Tuple[str] = ("DownEncoderBlock2D",) , __snake_case : Tuple[str] = ("UpDecoderBlock2D",) , __snake_case : Tuple[int] = (64,) , __snake_case : int = 1 , __snake_case : str = "silu" , __snake_case : int = 3 , __snake_case : int = 32 , __snake_case : int = 256 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : float = 0.1_8_2_1_5 , __snake_case : str = "group" , ):
super().__init__()
# pass init params to Encoder
lowerCamelCase :Tuple = Encoder(
in_channels=__snake_case , out_channels=__snake_case , down_block_types=__snake_case , block_out_channels=__snake_case , layers_per_block=__snake_case , act_fn=__snake_case , norm_num_groups=__snake_case , double_z=__snake_case , )
lowerCamelCase :str = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase :List[Any] = nn.Convad(__snake_case , __snake_case , 1 )
lowerCamelCase :str = VectorQuantizer(__snake_case , __snake_case , beta=0.2_5 , remap=__snake_case , sane_index_shape=__snake_case )
lowerCamelCase :List[str] = nn.Convad(__snake_case , __snake_case , 1 )
# pass init params to Decoder
lowerCamelCase :str = Decoder(
in_channels=__snake_case , out_channels=__snake_case , up_block_types=__snake_case , block_out_channels=__snake_case , layers_per_block=__snake_case , act_fn=__snake_case , norm_num_groups=__snake_case , norm_type=__snake_case , )
@apply_forward_hook
def snake_case ( self : Tuple , __snake_case : torch.FloatTensor , __snake_case : bool = True ):
lowerCamelCase :str = self.encoder(__snake_case )
lowerCamelCase :int = self.quant_conv(__snake_case )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__snake_case )
@apply_forward_hook
def snake_case ( self : Tuple , __snake_case : torch.FloatTensor , __snake_case : bool = False , __snake_case : bool = True ):
# also go through quantization layer
if not force_not_quantize:
lowerCamelCase :List[str] = self.quantize(__snake_case )
else:
lowerCamelCase :Any = h
lowerCamelCase :List[str] = self.post_quant_conv(__snake_case )
lowerCamelCase :int = self.decoder(__snake_case , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__snake_case )
def snake_case ( self : str , __snake_case : torch.FloatTensor , __snake_case : bool = True ):
lowerCamelCase :Union[str, Any] = sample
lowerCamelCase :Optional[int] = self.encode(__snake_case ).latents
lowerCamelCase :List[Any] = self.decode(__snake_case ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__snake_case )
| 703
|
import os
from math import logaa
def _lowerCamelCase ( a_ : str = "base_exp.txt"):
lowerCamelCase :float = 0
lowerCamelCase :Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))):
lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''',''')))
if x * logaa(a_) > largest:
lowerCamelCase :List[Any] = x * logaa(a_)
lowerCamelCase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
| 0
|
'''simple docstring'''
from __future__ import annotations
def _lowerCamelCase ( a_ : list[int]): # This function is recursive
lowerCamelCase :Union[str, Any] = len(a_)
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase :Optional[Any] = array[0]
lowerCamelCase :Any = False
lowerCamelCase :Tuple = 1
lowerCamelCase :list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase :Any = True
lowerCamelCase :Optional[int] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase :List[Any] = longest_subsequence(a_)
if len(a_) > len(a_):
lowerCamelCase :int = temp_array
else:
i += 1
lowerCamelCase :Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase :Union[str, Any] = [pivot, *longest_subsequence(a_)]
if len(a_) > len(a_):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _lowerCAmelCase :
def __init__( self : Optional[Any] , __snake_case : Tuple , __snake_case : Union[str, Any]=13 , __snake_case : int=10 , __snake_case : Optional[Any]=3 , __snake_case : Union[str, Any]=2 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=2 , __snake_case : int=True , __snake_case : Dict=True , __snake_case : Optional[int]=32 , __snake_case : int=5 , __snake_case : List[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : List[str]="gelu" , __snake_case : int=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : str=10 , __snake_case : Dict=0.0_2 , __snake_case : Union[str, Any]=0.9 , __snake_case : Any=None , ):
lowerCamelCase :Dict = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :str = image_size
lowerCamelCase :int = num_channels
lowerCamelCase :Any = patch_size
lowerCamelCase :Tuple = tubelet_size
lowerCamelCase :int = num_frames
lowerCamelCase :Any = is_training
lowerCamelCase :List[str] = use_labels
lowerCamelCase :Dict = hidden_size
lowerCamelCase :int = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Optional[int] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_act
lowerCamelCase :Dict = hidden_dropout_prob
lowerCamelCase :Tuple = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :List[Any] = initializer_range
lowerCamelCase :Any = mask_ratio
lowerCamelCase :Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCamelCase :Optional[Any] = (image_size // patch_size) ** 2
lowerCamelCase :List[Any] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCamelCase :Optional[Any] = int(mask_ratio * self.seq_length )
def snake_case ( self : Any ):
lowerCamelCase :str = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase :Any = None
if self.use_labels:
lowerCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase :Any = self.get_config()
return config, pixel_values, labels
def snake_case ( self : List[Any] ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def snake_case ( self : List[Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] ):
lowerCamelCase :str = VideoMAEModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Optional[int] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Tuple , __snake_case : List[Any] , __snake_case : Any , __snake_case : List[Any] ):
lowerCamelCase :Any = VideoMAEForPreTraining(__snake_case )
model.to(__snake_case )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase :int = torch.ones((self.num_masks,) )
lowerCamelCase :Dict = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCamelCase :Optional[Any] = mask.expand(self.batch_size , -1 ).bool()
lowerCamelCase :int = model(__snake_case , __snake_case )
# model only returns predictions for masked patches
lowerCamelCase :Optional[Any] = mask.sum().item()
lowerCamelCase :Any = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Tuple = self.prepare_config_and_inputs()
lowerCamelCase :Optional[Any] = config_and_inputs
lowerCamelCase :Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_UpperCAmelCase = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : str ):
lowerCamelCase :Union[str, Any] = VideoMAEModelTester(self )
lowerCamelCase :Tuple = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Tuple , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : int=False ):
lowerCamelCase :Optional[Any] = copy.deepcopy(__snake_case )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase :Optional[int] = torch.ones((self.model_tester.num_masks,) )
lowerCamelCase :Tuple = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCamelCase :Optional[int] = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowerCamelCase :Tuple = bool_masked_pos.to(__snake_case )
if return_labels:
if model_class in [
*get_values(__snake_case ),
]:
lowerCamelCase :str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def snake_case ( self : Dict ):
pass
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :List[str] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : List[str] ):
lowerCamelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
lowerCamelCase :Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Any = [*signature.parameters.keys()]
lowerCamelCase :Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : Tuple ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Dict = VideoMAEModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def snake_case ( self : str ):
if not self.has_attentions:
pass
else:
lowerCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :Any = True
for model_class in self.all_model_classes:
lowerCamelCase :Any = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase :List[str] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCamelCase :List[str] = True
lowerCamelCase :Any = False
lowerCamelCase :Optional[Any] = True
lowerCamelCase :Any = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :List[Any] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Tuple = True
lowerCamelCase :Dict = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Optional[int] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :List[Any] = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Any = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :List[str] = True
lowerCamelCase :Optional[Any] = True
lowerCamelCase :Optional[int] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + 1 , len(__snake_case ) )
lowerCamelCase :Any = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[Any] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ):
lowerCamelCase :List[str] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :List[Any] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :List[Any] = outputs.hidden_states
lowerCamelCase :Optional[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__snake_case ) , __snake_case )
lowerCamelCase :Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase :Optional[Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Tuple = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Dict = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self : Optional[Any] ):
pass
def _lowerCamelCase ( ):
lowerCamelCase :List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''')
lowerCamelCase :List[str] = np.load(a_)
return list(a_)
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case ( self : Optional[Any] ):
lowerCamelCase :List[str] = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :int = prepare_video()
lowerCamelCase :Dict = image_processor(__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :List[str] = model(**__snake_case )
# verify the logits
lowerCamelCase :Dict = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :List[str] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
@slow
def snake_case ( self : Dict ):
lowerCamelCase :str = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(__snake_case )
lowerCamelCase :str = self.default_image_processor
lowerCamelCase :Any = prepare_video()
lowerCamelCase :List[Any] = image_processor(__snake_case , return_tensors='''pt''' ).to(__snake_case )
# add boolean mask, indicating which patches to mask
lowerCamelCase :int = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase :Optional[Any] = torch.load(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Union[str, Any] = model(**__snake_case )
# verify the logits
lowerCamelCase :List[Any] = torch.Size([1, 1408, 1536] )
lowerCamelCase :List[str] = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=__snake_case )
self.assertEqual(outputs.logits.shape , __snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCamelCase :str = torch.tensor([0.5_1_4_2] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.loss , __snake_case , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCamelCase :List[Any] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=__snake_case ).to(
__snake_case )
with torch.no_grad():
lowerCamelCase :Optional[int] = model(**__snake_case )
lowerCamelCase :Any = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=__snake_case )
self.assertTrue(torch.allclose(outputs.loss , __snake_case , atol=1e-4 ) )
| 705
|
import math
def _lowerCamelCase ( a_ : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( a_ : float = 0.1):
lowerCamelCase :Dict = 3
lowerCamelCase :List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = (EulerDiscreteScheduler,)
_UpperCAmelCase = 1_0
def snake_case ( self : Dict , **__snake_case : Tuple ):
lowerCamelCase :List[Any] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**__snake_case )
return config
def snake_case ( self : Any ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__snake_case )
def snake_case ( self : List[str] ):
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__snake_case , beta_end=__snake_case )
def snake_case ( self : Dict ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__snake_case )
def snake_case ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def snake_case ( self : List[Any] ):
lowerCamelCase :int = self.scheduler_classes[0]
lowerCamelCase :List[Any] = self.get_scheduler_config()
lowerCamelCase :List[str] = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase :Optional[Any] = torch.manual_seed(0 )
lowerCamelCase :List[str] = self.dummy_model()
lowerCamelCase :List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase :List[str] = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase :Optional[int] = scheduler.scale_model_input(__snake_case , __snake_case )
lowerCamelCase :Dict = model(__snake_case , __snake_case )
lowerCamelCase :List[str] = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case )
lowerCamelCase :Any = output.prev_sample
lowerCamelCase :Optional[int] = torch.sum(torch.abs(__snake_case ) )
lowerCamelCase :Tuple = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def snake_case ( self : str ):
lowerCamelCase :Optional[Any] = self.scheduler_classes[0]
lowerCamelCase :List[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase :Dict = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase :Optional[int] = torch.manual_seed(0 )
lowerCamelCase :Union[str, Any] = self.dummy_model()
lowerCamelCase :str = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase :int = sample.to(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase :int = scheduler.scale_model_input(__snake_case , __snake_case )
lowerCamelCase :List[str] = model(__snake_case , __snake_case )
lowerCamelCase :Optional[Any] = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case )
lowerCamelCase :str = output.prev_sample
lowerCamelCase :List[Any] = torch.sum(torch.abs(__snake_case ) )
lowerCamelCase :str = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def snake_case ( self : Optional[int] ):
lowerCamelCase :Optional[int] = self.scheduler_classes[0]
lowerCamelCase :Any = self.get_scheduler_config()
lowerCamelCase :List[str] = scheduler_class(**__snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=__snake_case )
lowerCamelCase :Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase :str = self.dummy_model()
lowerCamelCase :int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCamelCase :List[Any] = sample.to(__snake_case )
for t in scheduler.timesteps:
lowerCamelCase :Union[str, Any] = scheduler.scale_model_input(__snake_case , __snake_case )
lowerCamelCase :int = model(__snake_case , __snake_case )
lowerCamelCase :Any = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case )
lowerCamelCase :Tuple = output.prev_sample
lowerCamelCase :List[str] = torch.sum(torch.abs(__snake_case ) )
lowerCamelCase :Tuple = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def snake_case ( self : int ):
lowerCamelCase :Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase :Tuple = self.get_scheduler_config()
lowerCamelCase :List[str] = scheduler_class(**__snake_case , use_karras_sigmas=__snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=__snake_case )
lowerCamelCase :int = torch.manual_seed(0 )
lowerCamelCase :List[str] = self.dummy_model()
lowerCamelCase :Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCamelCase :Optional[int] = sample.to(__snake_case )
for t in scheduler.timesteps:
lowerCamelCase :str = scheduler.scale_model_input(__snake_case , __snake_case )
lowerCamelCase :Optional[int] = model(__snake_case , __snake_case )
lowerCamelCase :int = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case )
lowerCamelCase :Optional[Any] = output.prev_sample
lowerCamelCase :str = torch.sum(torch.abs(__snake_case ) )
lowerCamelCase :List[str] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
| 706
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = -1
lowerCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :Optional[int] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :int = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[Any] = -1
lowerCamelCase :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Tuple = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase :List[str] = TextIteratorStreamer(__snake_case )
lowerCamelCase :List[str] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowerCamelCase :Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : str ):
lowerCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :List[str] = -1
lowerCamelCase :Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :Optional[Any] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowerCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase :Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase :List[str] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase :int = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCamelCase :List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase :Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Union[str, Any] = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase :Dict = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase :Tuple = cs.out[:-1] # Remove the final "\n"
lowerCamelCase :int = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowerCamelCase :Optional[int] = -1
lowerCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowerCamelCase :List[Any] = TextIteratorStreamer(__snake_case , timeout=0.0_0_1 )
lowerCamelCase :Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase :Tuple = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = ''''''
for new_text in streamer:
streamer_text += new_text
| 49
| 0
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _lowerCamelCase ( a_ : Dict):
lowerCamelCase :Dict = {}
lowerCamelCase :Dict = job['''started_at''']
lowerCamelCase :List[str] = job['''completed_at''']
lowerCamelCase :int = date_parser.parse(a_)
lowerCamelCase :List[Any] = date_parser.parse(a_)
lowerCamelCase :int = round((end_datetime - start_datetime).total_seconds() / 60.0)
lowerCamelCase :Any = start
lowerCamelCase :Optional[Any] = end
lowerCamelCase :List[str] = duration_in_min
return job_info
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[int]=None):
lowerCamelCase :List[str] = None
if token is not None:
lowerCamelCase :List[Any] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
lowerCamelCase :Dict = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
lowerCamelCase :int = requests.get(a_ , headers=a_).json()
lowerCamelCase :Any = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(a_) for job in result['''jobs''']})
lowerCamelCase :int = math.ceil((result['''total_count'''] - 1_00) / 1_00)
for i in range(a_):
lowerCamelCase :List[Any] = requests.get(url + F"&page={i + 2}" , headers=a_).json()
job_time.update({job['''name''']: extract_time_from_single_job(a_) for job in result['''jobs''']})
return job_time
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}")
return {}
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
A__ = parser.parse_args()
A__ = get_job_time(args.workflow_run_id)
A__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'{k}: {v["duration"]}')
| 707
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( a_ : int):
if not isinstance(a_ , a_):
lowerCamelCase :Tuple = F"Input value of [number={number}] must be an integer"
raise TypeError(a_)
if number < 1:
raise ValueError('''Input must be a positive integer''')
return -1 if len(prime_factors(a_)) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
def _lowerCamelCase ( a_ : Optional[Any] , a_ : int):
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''')
for i in range(a_):
for j in range(a_):
if dist[i][j] != float('''inf'''):
print(int(dist[i][j]) , end='''\t''')
else:
print('''INF''' , end='''\t''')
print()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[int]):
lowerCamelCase :str = [[float('''inf''') for _ in range(a_)] for _ in range(a_)]
for i in range(a_):
for j in range(a_):
lowerCamelCase :Tuple = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a_):
# looping through rows of graph array
for i in range(a_):
# looping through columns of graph array
for j in range(a_):
if (
dist[i][k] != float('''inf''')
and dist[k][j] != float('''inf''')
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowerCamelCase :List[str] = dist[i][k] + dist[k][j]
_print_dist(a_ , a_)
return dist, v
if __name__ == "__main__":
A__ = int(input("""Enter number of vertices: """))
A__ = int(input("""Enter number of edges: """))
A__ = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
A__ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
A__ = int(input("""Enter source:"""))
A__ = int(input("""Enter destination:"""))
A__ = float(input("""Enter weight:"""))
A__ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 708
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCamelCase ( a_ : str , a_ : str=False):
lowerCamelCase :Optional[int] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token'''))
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings'''))
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''))
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight'''))
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias'''))
for stage_idx in range(len(config.backbone_config.depths)):
for layer_idx in range(config.backbone_config.depths[stage_idx]):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight"))
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias"))
# transformer encoder
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias"))
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase :List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''') else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
])
# fmt: on
return rename_keys
def _lowerCamelCase ( a_ : Any , a_ : Any , a_ : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowerCamelCase :Union[str, Any] = ''''''
else:
lowerCamelCase :Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight")
lowerCamelCase :Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase :Any = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase :Tuple = in_proj_bias[: config.hidden_size]
lowerCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase :Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase :List[Any] = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( a_ : int):
lowerCamelCase :Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def _lowerCamelCase ( a_ : int , a_ : Any , a_ : Tuple):
lowerCamelCase :Optional[Any] = dct.pop(a_)
lowerCamelCase :str = val
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :Tuple = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def _lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[Any]=False):
lowerCamelCase :Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=a_ , )
lowerCamelCase :Optional[int] = ViTHybridConfig(backbone_config=a_ , image_size=3_84 , num_labels=10_00)
lowerCamelCase :List[Any] = False
# load original model from timm
lowerCamelCase :List[str] = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase :List[str] = timm_model.state_dict()
if base_model:
remove_classification_head_(a_)
lowerCamelCase :Tuple = create_rename_keys(a_ , a_)
for src, dest in rename_keys:
rename_key(a_ , a_ , a_)
read_in_q_k_v(a_ , a_ , a_)
lowerCamelCase :List[str] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Optional[Any] = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Optional[int] = idalabel
lowerCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase :Optional[Any] = ViTHybridModel(a_).eval()
else:
lowerCamelCase :Dict = ViTHybridForImageClassification(a_).eval()
model.load_state_dict(a_)
# create image processor
lowerCamelCase :Dict = create_transform(**resolve_data_config({} , model=a_))
lowerCamelCase :str = transform.transforms
lowerCamelCase :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCamelCase :Any = ViTHybridImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase :Dict = prepare_img()
lowerCamelCase :str = transform(a_).unsqueeze(0)
lowerCamelCase :str = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
lowerCamelCase :Optional[int] = model(a_)
lowerCamelCase :Union[str, Any] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1).item())
if base_model:
lowerCamelCase :Union[str, Any] = timm_model.forward_features(a_)
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a_ , outputs.pooler_output , atol=1e-3)
else:
lowerCamelCase :List[str] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(a_)
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}")
model.push_to_hub(F"ybelkada/{vit_name}")
processor.push_to_hub(F"ybelkada/{vit_name}")
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
A__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'naver-clova-ix/donut-base-finetuned-docvqa'
_UpperCAmelCase = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
_UpperCAmelCase = 'document_qa'
_UpperCAmelCase = AutoProcessor
_UpperCAmelCase = VisionEncoderDecoderModel
_UpperCAmelCase = ['image', 'text']
_UpperCAmelCase = ['text']
def __init__( self : Union[str, Any] , *__snake_case : Tuple , **__snake_case : int ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__snake_case , **__snake_case )
def snake_case ( self : Dict , __snake_case : "Image" , __snake_case : str ):
lowerCamelCase :str = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowerCamelCase :List[str] = task_prompt.replace('''{user_input}''' , __snake_case )
lowerCamelCase :Optional[int] = self.pre_processor.tokenizer(
__snake_case , add_special_tokens=__snake_case , return_tensors='''pt''' ).input_ids
lowerCamelCase :str = self.pre_processor(__snake_case , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def snake_case ( self : Optional[int] , __snake_case : Tuple ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__snake_case , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__snake_case , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__snake_case , ).sequences
def snake_case ( self : Dict , __snake_case : Optional[int] ):
lowerCamelCase :Optional[int] = self.pre_processor.batch_decode(__snake_case )[0]
lowerCamelCase :Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
lowerCamelCase :Optional[Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
lowerCamelCase :Union[str, Any] = re.sub(R'''<.*?>''' , '''''' , __snake_case , count=1 ).strip() # remove first task start token
lowerCamelCase :Tuple = self.pre_processor.tokenajson(__snake_case )
return sequence["answer"]
| 709
|
def _lowerCamelCase ( a_ : int = 4_00_00_00):
lowerCamelCase :Dict = [0, 1]
lowerCamelCase :Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
lowerCamelCase :Dict = 0
for j in range(len(a_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 49
| 0
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xlnet'
_UpperCAmelCase = ['mems']
_UpperCAmelCase = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[str] , __snake_case : List[str]=32000 , __snake_case : List[Any]=1024 , __snake_case : List[str]=24 , __snake_case : Union[str, Any]=16 , __snake_case : Tuple=4096 , __snake_case : Any="gelu" , __snake_case : Dict=True , __snake_case : Tuple="bi" , __snake_case : Tuple=0.0_2 , __snake_case : Dict=1e-1_2 , __snake_case : Union[str, Any]=0.1 , __snake_case : str=512 , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=False , __snake_case : Any=-1 , __snake_case : Any=False , __snake_case : Optional[int]="last" , __snake_case : int=True , __snake_case : Any="tanh" , __snake_case : Any=0.1 , __snake_case : int=5 , __snake_case : str=5 , __snake_case : Optional[int]=5 , __snake_case : Dict=1 , __snake_case : Optional[int]=2 , **__snake_case : Union[str, Any] , ):
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Any = d_model
lowerCamelCase :Union[str, Any] = n_layer
lowerCamelCase :Union[str, Any] = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCamelCase :str = d_model // n_head
lowerCamelCase :int = ff_activation
lowerCamelCase :Dict = d_inner
lowerCamelCase :int = untie_r
lowerCamelCase :Optional[int] = attn_type
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Dict = layer_norm_eps
lowerCamelCase :Union[str, Any] = dropout
lowerCamelCase :Union[str, Any] = mem_len
lowerCamelCase :Optional[int] = reuse_len
lowerCamelCase :List[str] = bi_data
lowerCamelCase :Any = clamp_len
lowerCamelCase :Dict = same_length
lowerCamelCase :Optional[Any] = summary_type
lowerCamelCase :Union[str, Any] = summary_use_proj
lowerCamelCase :Optional[Any] = summary_activation
lowerCamelCase :Optional[Any] = summary_last_dropout
lowerCamelCase :List[str] = start_n_top
lowerCamelCase :int = end_n_top
lowerCamelCase :Any = bos_token_id
lowerCamelCase :Optional[Any] = pad_token_id
lowerCamelCase :Union[str, Any] = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , __snake_case , )
lowerCamelCase :Optional[int] = kwargs['''use_cache''']
lowerCamelCase :Optional[int] = use_mems_eval
lowerCamelCase :Optional[int] = use_mems_train
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def snake_case ( self : Dict ):
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def snake_case ( self : Optional[Any] , __snake_case : int ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711
|
import numpy
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : numpy.ndarray , __snake_case : numpy.ndarray ):
lowerCamelCase :Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase :Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase :Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase :Any = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase :Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase :List[str] = numpy.zeros(output_array.shape )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase :Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase :Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case ( self : Any ):
lowerCamelCase :Union[str, Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase :Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase :int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case ( self : Dict , __snake_case : numpy.ndarray , __snake_case : int , __snake_case : bool ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase :Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase :Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def snake_case ( self : Optional[int] , __snake_case : numpy.ndarray ):
lowerCamelCase :int = input_arr
lowerCamelCase :Union[str, Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase :Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase :Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _lowerCamelCase ( a_ : numpy.ndarray):
return 1 / (1 + numpy.exp(-value))
def _lowerCamelCase ( a_ : numpy.ndarray):
return (value) * (1 - (value))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase :int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
lowerCamelCase :List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=10 , give_loss=a_)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 49
| 0
|
def _lowerCamelCase ( a_ : str):
lowerCamelCase :Optional[int] = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''')
lowerCamelCase :str = hex_num[0] == '''-'''
if is_negative:
lowerCamelCase :int = hex_num[1:]
try:
lowerCamelCase :int = int(a_ , 16)
except ValueError:
raise ValueError('''Invalid value was passed to the function''')
lowerCamelCase :List[Any] = ''''''
while int_num > 0:
lowerCamelCase :Optional[Any] = str(int_num % 2) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
def _lowerCamelCase ( a_ : str , a_ : str):
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :List[str] = len(a_)
lowerCamelCase :int = [[False for _ in range(m + 1)] for _ in range(n + 1)]
lowerCamelCase :Optional[Any] = True
for i in range(a_):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase :Any = True
if a[i].islower():
lowerCamelCase :List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = """▁"""
A__ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
A__ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
A__ = {
"""facebook/s2t-small-librispeech-asr""": 1_024,
}
A__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
A__ = {"""mustc""": MUSTC_LANGS}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = MAX_MODEL_INPUT_SIZES
_UpperCAmelCase = ['input_ids', 'attention_mask']
_UpperCAmelCase = []
def __init__( self : Tuple , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Tuple="<s>" , __snake_case : Dict="</s>" , __snake_case : List[str]="<pad>" , __snake_case : Optional[Any]="<unk>" , __snake_case : Optional[Any]=False , __snake_case : Tuple=False , __snake_case : int=None , __snake_case : List[str]=None , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : str , ):
lowerCamelCase :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , do_upper_case=__snake_case , do_lower_case=__snake_case , tgt_lang=__snake_case , lang_codes=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
lowerCamelCase :Optional[int] = do_upper_case
lowerCamelCase :int = do_lower_case
lowerCamelCase :Dict = load_json(__snake_case )
lowerCamelCase :Tuple = {v: k for k, v in self.encoder.items()}
lowerCamelCase :List[Any] = spm_file
lowerCamelCase :Union[str, Any] = load_spm(__snake_case , self.sp_model_kwargs )
if lang_codes is not None:
lowerCamelCase :List[str] = lang_codes
lowerCamelCase :int = LANGUAGES[lang_codes]
lowerCamelCase :Tuple = [F"<lang:{lang}>" for lang in self.langs]
lowerCamelCase :Optional[int] = {lang: self.sp_model.PieceToId(F"<lang:{lang}>" ) for lang in self.langs}
lowerCamelCase :str = self.lang_tokens
lowerCamelCase :List[Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowerCamelCase :int = {}
@property
def snake_case ( self : Optional[Any] ):
return len(self.encoder )
@property
def snake_case ( self : int ):
return self._tgt_lang
@tgt_lang.setter
def snake_case ( self : List[str] , __snake_case : Union[str, Any] ):
lowerCamelCase :Dict = new_tgt_lang
self.set_tgt_lang_special_tokens(__snake_case )
def snake_case ( self : Tuple , __snake_case : str ):
lowerCamelCase :Any = self.lang_code_to_id[tgt_lang]
lowerCamelCase :List[str] = [lang_code_id]
def snake_case ( self : Any , __snake_case : str ):
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def snake_case ( self : Tuple , __snake_case : int ):
return self.encoder.get(__snake_case , self.encoder[self.unk_token] )
def snake_case ( self : Dict , __snake_case : int ):
return self.decoder.get(__snake_case , self.unk_token )
def snake_case ( self : Optional[int] , __snake_case : List[str] ):
lowerCamelCase :Any = []
lowerCamelCase :int = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowerCamelCase :int = self.sp_model.decode(__snake_case )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowerCamelCase :Any = []
else:
current_sub_tokens.append(__snake_case )
lowerCamelCase :int = self.sp_model.decode(__snake_case )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def snake_case ( self : int , __snake_case : Tuple , __snake_case : Any=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case ( self : Any , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
lowerCamelCase :List[str] = [1] * len(self.prefix_tokens )
lowerCamelCase :Union[str, Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def snake_case ( self : Tuple ):
lowerCamelCase :List[str] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
lowerCamelCase :Any = self.__dict__.copy()
lowerCamelCase :str = None
return state
def __setstate__( self : Any , __snake_case : Dict ):
lowerCamelCase :int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = load_spm(self.spm_file , self.sp_model_kwargs )
def snake_case ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ):
lowerCamelCase :int = Path(__snake_case )
assert save_dir.is_dir(), F"{save_directory} should be a directory"
lowerCamelCase :List[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowerCamelCase :Optional[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __snake_case )
if os.path.abspath(self.spm_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __snake_case )
elif not os.path.isfile(self.spm_file ):
with open(__snake_case , '''wb''' ) as fi:
lowerCamelCase :List[str] = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (str(__snake_case ), str(__snake_case ))
def _lowerCamelCase ( a_ : str , a_ : Dict[str, Any]):
lowerCamelCase :Tuple = sentencepiece.SentencePieceProcessor(**a_)
spm.Load(str(a_))
return spm
def _lowerCamelCase ( a_ : str):
with open(a_ , '''r''') as f:
return json.load(a_)
def _lowerCamelCase ( a_ : List[str] , a_ : str):
with open(a_ , '''w''') as f:
json.dump(a_ , a_ , indent=2)
| 713
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : Optional[int] , __snake_case : int=13 , __snake_case : str=[30, 30] , __snake_case : Tuple=2 , __snake_case : Optional[Any]=3 , __snake_case : int=True , __snake_case : Tuple=True , __snake_case : List[Any]=32 , __snake_case : int=5 , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=37 , __snake_case : str="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : str=0.0_2 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=None , __snake_case : List[str]=8 , __snake_case : Any=10 , ):
lowerCamelCase :Optional[Any] = parent
lowerCamelCase :List[Any] = batch_size
lowerCamelCase :Any = image_size
lowerCamelCase :Union[str, Any] = patch_size
lowerCamelCase :Any = num_channels
lowerCamelCase :List[Any] = is_training
lowerCamelCase :Optional[Any] = use_labels
lowerCamelCase :Any = hidden_size
lowerCamelCase :List[Any] = num_hidden_layers
lowerCamelCase :List[str] = num_attention_heads
lowerCamelCase :Tuple = intermediate_size
lowerCamelCase :List[str] = hidden_act
lowerCamelCase :List[str] = hidden_dropout_prob
lowerCamelCase :Any = attention_probs_dropout_prob
lowerCamelCase :List[Any] = type_sequence_label_size
lowerCamelCase :Optional[int] = initializer_range
lowerCamelCase :List[Any] = num_labels
lowerCamelCase :Any = scope
lowerCamelCase :Union[str, Any] = n_targets
lowerCamelCase :Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCamelCase :Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCamelCase :str = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : List[str] ):
lowerCamelCase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCamelCase :List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCamelCase :Optional[int] = []
for i in range(self.batch_size ):
lowerCamelCase :List[str] = {}
lowerCamelCase :Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__snake_case )
lowerCamelCase :List[str] = torch.rand(self.n_targets , 4 , device=__snake_case )
labels.append(__snake_case )
lowerCamelCase :str = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Union[str, Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any ):
lowerCamelCase :Optional[Any] = YolosModel(config=__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :Union[str, Any] = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowerCamelCase :int = YolosForObjectDetection(__snake_case )
model.to(__snake_case )
model.eval()
lowerCamelCase :str = model(pixel_values=__snake_case )
lowerCamelCase :Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCamelCase :int = model(pixel_values=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : int ):
lowerCamelCase :List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase :str = config_and_inputs
lowerCamelCase :Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_UpperCAmelCase = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def snake_case ( self : Any , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Dict=False ):
lowerCamelCase :Optional[int] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCamelCase :Dict = []
for i in range(self.model_tester.batch_size ):
lowerCamelCase :Optional[Any] = {}
lowerCamelCase :List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=__snake_case , dtype=torch.long )
lowerCamelCase :str = torch.ones(
self.model_tester.n_targets , 4 , device=__snake_case , dtype=torch.float )
labels.append(__snake_case )
lowerCamelCase :Union[str, Any] = labels
return inputs_dict
def snake_case ( self : Tuple ):
lowerCamelCase :Union[str, Any] = YolosModelTester(self )
lowerCamelCase :Dict = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Tuple ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :str = model_class(__snake_case )
lowerCamelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase :Tuple = [*signature.parameters.keys()]
lowerCamelCase :Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case ( self : int ):
lowerCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case ( self : str ):
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase :int = True
# in YOLOS, the seq_len is different
lowerCamelCase :str = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCamelCase :str = True
lowerCamelCase :Tuple = False
lowerCamelCase :Optional[int] = True
lowerCamelCase :int = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :str = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :str = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase :Optional[Any] = True
lowerCamelCase :str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Tuple = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase :Optional[int] = len(__snake_case )
# Check attention is always last and order is fine
lowerCamelCase :Union[str, Any] = True
lowerCamelCase :List[Any] = True
lowerCamelCase :Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Dict = 1
self.assertEqual(out_len + added_hidden_states , len(__snake_case ) )
lowerCamelCase :Dict = outputs.attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : List[str] ):
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
lowerCamelCase :Union[str, Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase :Any = model(**self._prepare_for_class(__snake_case , __snake_case ) )
lowerCamelCase :Optional[Any] = outputs.hidden_states
lowerCamelCase :Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# YOLOS has a different seq_length
lowerCamelCase :List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase :Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase :Any = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__snake_case )
@slow
def snake_case ( self : Dict ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase :Tuple = YolosModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _lowerCamelCase ( ):
lowerCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowerCamelCase :Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__snake_case )
lowerCamelCase :Optional[Any] = self.default_image_processor
lowerCamelCase :str = prepare_img()
lowerCamelCase :Dict = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase :Optional[Any] = model(inputs.pixel_values )
# verify outputs
lowerCamelCase :int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __snake_case )
lowerCamelCase :Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__snake_case , )
lowerCamelCase :Any = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __snake_case , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __snake_case , atol=1e-4 ) )
# verify postprocessing
lowerCamelCase :List[str] = image_processor.post_process_object_detection(
__snake_case , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCamelCase :List[str] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__snake_case )
lowerCamelCase :str = [75, 75, 17, 63, 17]
lowerCamelCase :Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__snake_case )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __snake_case , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __snake_case )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __snake_case ) )
| 49
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
lowerCamelCase :List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase :Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase :Any = test_metrics
@require_cpu
def snake_case ( self : Dict ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def snake_case ( self : int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def snake_case ( self : Any ):
self.test_metrics.main()
@require_multi_gpu
def snake_case ( self : Optional[int] ):
print(F"Found {torch.cuda.device_count()} devices." )
lowerCamelCase :Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 49
| 0
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _lowerCamelCase ( a_ : List[str]):
lowerCamelCase :List[str] = args.pruning_method
lowerCamelCase :Tuple = args.threshold
lowerCamelCase :List[str] = args.model_name_or_path.rstrip('''/''')
lowerCamelCase :Optional[int] = args.target_model_path
print(F"Load fine-pruned model from {model_name_or_path}")
lowerCamelCase :Optional[int] = torch.load(os.path.join(a_ , '''pytorch_model.bin'''))
lowerCamelCase :List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCamelCase :int = tensor
print(F"Copied layer {name}")
elif "classifier" in name or "qa_output" in name:
lowerCamelCase :int = tensor
print(F"Copied layer {name}")
elif "bias" in name:
lowerCamelCase :Optional[int] = tensor
print(F"Copied layer {name}")
else:
if pruning_method == "magnitude":
lowerCamelCase :List[Any] = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_)
lowerCamelCase :Optional[Any] = tensor * mask
print(F"Pruned layer {name}")
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCamelCase :Any = name[:-6]
lowerCamelCase :int = model[F"{prefix_}mask_scores"]
lowerCamelCase :List[str] = TopKBinarizer.apply(a_ , a_)
lowerCamelCase :Optional[int] = tensor * mask
print(F"Pruned layer {name}")
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCamelCase :List[Any] = name[:-6]
lowerCamelCase :Optional[Any] = model[F"{prefix_}mask_scores"]
lowerCamelCase :Any = ThresholdBinarizer.apply(a_ , a_ , a_)
lowerCamelCase :Optional[int] = tensor * mask
print(F"Pruned layer {name}")
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCamelCase :List[str] = name[:-6]
lowerCamelCase :Optional[Any] = model[F"{prefix_}mask_scores"]
lowerCamelCase :Optional[Any] = -0.1, 1.1
lowerCamelCase :int = torch.sigmoid(a_)
lowerCamelCase :Any = s * (r - l) + l
lowerCamelCase :List[str] = s_bar.clamp(min=0.0 , max=1.0)
lowerCamelCase :Optional[Any] = tensor * mask
print(F"Pruned layer {name}")
else:
raise ValueError('''Unknown pruning method''')
if target_model_path is None:
lowerCamelCase :int = os.path.join(
os.path.dirname(a_) , F"bertarized_{os.path.basename(a_)}")
if not os.path.isdir(a_):
shutil.copytree(a_ , a_)
print(F"\nCreated folder {target_model_path}")
torch.save(a_ , os.path.join(a_ , '''pytorch_model.bin'''))
print('''\nPruned model saved! See you later!''')
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
A__ = parser.parse_args()
main(args)
| 715
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ''
_UpperCAmelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_UpperCAmelCase = None # compression type in fsspec. ex: "gzip"
_UpperCAmelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , __snake_case : str = "" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , **__snake_case : Dict ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase :Optional[Any] = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase :List[str] = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCamelCase :Dict = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCamelCase :List[str] = None
@classmethod
def snake_case ( cls : Any , __snake_case : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def snake_case ( self : Any ):
if self.dir_cache is None:
lowerCamelCase :Optional[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCamelCase :Optional[Any] = {f['''name''']: f}
def snake_case ( self : Union[str, Any] , __snake_case : str ):
return self.file.open().read()
def snake_case ( self : Optional[int] , __snake_case : str , __snake_case : str = "rb" , __snake_case : int=None , __snake_case : Optional[int]=True , __snake_case : str=None , **__snake_case : str , ):
lowerCamelCase :List[str] = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = 'bz2'
_UpperCAmelCase = '.bz2'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = 'gzip'
_UpperCAmelCase = '.gz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = 'lz4'
_UpperCAmelCase = '.lz4'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'xz'
_UpperCAmelCase = 'xz'
_UpperCAmelCase = '.xz'
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = 'zstd'
_UpperCAmelCase = '.zst'
def __init__( self : str , __snake_case : str , __snake_case : str = "rb" , __snake_case : Optional[str] = None , __snake_case : Optional[dict] = None , __snake_case : int = DEFAULT_BLOCK_SIZE , **__snake_case : int , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase :Tuple = self.file.__enter__
class _lowerCAmelCase :
def __init__( self : Dict , __snake_case : Tuple ):
lowerCamelCase :Optional[int] = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : str , *__snake_case : Optional[Any] , **__snake_case : List[Any] ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def snake_case ( self : List[Any] ):
return next(self._file )
def __getattr__( self : Any , __snake_case : str ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case : Optional[int] , **__snake_case : str ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
lowerCamelCase :Dict = fixed_enter
| 49
| 0
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A__ = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any=None , __snake_case : int=1 ):
lowerCamelCase :Optional[Any] = tokenizer
lowerCamelCase :Dict = dataset
lowerCamelCase :Tuple = len(__snake_case ) if n_tasks is None else n_tasks
lowerCamelCase :List[str] = n_copies
def __iter__( self : Tuple ):
lowerCamelCase :Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
lowerCamelCase :List[str] = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : int ):
lowerCamelCase :Dict = start_length
lowerCamelCase :Any = eof_strings
lowerCamelCase :int = tokenizer
def __call__( self : Any , __snake_case : str , __snake_case : Dict , **__snake_case : List[str] ):
lowerCamelCase :Union[str, Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase :Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__snake_case )
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :Tuple = re.split('''(%s)''' % '''|'''.join(a_) , a_)
# last string should be ""
return "".join(string_list[:-2])
def _lowerCamelCase ( a_ : List[str] , a_ : str , a_ : Optional[int] , a_ : str , a_ : Optional[int] , a_ : Tuple=20 , **a_ : int):
lowerCamelCase :Dict = defaultdict(a_) # dict of list of generated tokens
for step, batch in tqdm(enumerate(a_)):
with torch.no_grad():
lowerCamelCase :Optional[int] = batch['''ids'''].shape[-1]
lowerCamelCase :Union[str, Any] = accelerator.unwrap_model(a_).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=a_ , **a_)
# each task is generated batch_size times
lowerCamelCase :Any = batch['''task_id'''].repeat(a_)
lowerCamelCase :Tuple = accelerator.pad_across_processes(
a_ , dim=1 , pad_index=tokenizer.pad_token_id)
lowerCamelCase :List[str] = accelerator.gather((generated_tokens, generated_tasks))
lowerCamelCase :Tuple = generated_tokens.cpu().numpy()
lowerCamelCase :Tuple = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(a_ , a_):
gen_token_dict[task].append(a_)
lowerCamelCase :Optional[Any] = [[] for _ in range(a_)]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase :List[str] = tokenizer.decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_)
code_gens[task].append(remove_last_block(a_))
return code_gens
def _lowerCamelCase ( ):
# Setup configuration
lowerCamelCase :int = HfArgumentParser(a_)
lowerCamelCase :Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase :Optional[int] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase :str = '''false'''
if args.num_workers is None:
lowerCamelCase :Tuple = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase :Union[str, Any] = Accelerator()
set_seed(args.seed , device_specific=a_)
# Load model and tokenizer
lowerCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
lowerCamelCase :str = tokenizer.eos_token
lowerCamelCase :str = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
# Generation settings
lowerCamelCase :Any = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , a_ , a_)]),
}
# Load evaluation dataset and metric
lowerCamelCase :int = load_dataset('''openai_humaneval''')
lowerCamelCase :Dict = load_metric('''code_eval''')
lowerCamelCase :Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''])
lowerCamelCase :Union[str, Any] = args.n_samples // args.batch_size
lowerCamelCase :Union[str, Any] = TokenizedDataset(a_ , human_eval['''test'''] , n_copies=a_ , n_tasks=a_)
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase :Dict = DataLoader(a_ , batch_size=1)
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase :Optional[int] = code_eval_metric.compute(references=[''''''] , predictions=[['''''']])
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''')
raise exception
lowerCamelCase :Union[str, Any] = accelerator.prepare(a_ , a_)
lowerCamelCase :int = complete_code(
a_ , a_ , a_ , a_ , n_tasks=a_ , batch_size=args.batch_size , **a_ , )
if accelerator.is_main_process:
lowerCamelCase :Dict = []
for task in tqdm(range(a_)):
lowerCamelCase :Tuple = human_eval['''test'''][task]['''test''']
lowerCamelCase :int = F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point)
# Evaluate completions with "code_eval" metric
lowerCamelCase :Any = code_eval_metric.compute(
references=a_ , predictions=a_ , num_workers=args.num_workers)
print(F"Results: {pass_at_k}")
# Save results to json file
with open(args.output_file , '''w''') as fp:
json.dump(a_ , a_)
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 716
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = LEDTokenizerFast
_UpperCAmelCase = True
def snake_case ( self : Any ):
super().setUp()
lowerCamelCase :Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase :Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase :List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase :int = {'''unk_token''': '''<unk>'''}
lowerCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def snake_case ( self : int , **__snake_case : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Dict , **__snake_case : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def snake_case ( self : Optional[Any] , __snake_case : Union[str, Any] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : Any ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def snake_case ( self : int ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def snake_case ( self : str ):
lowerCamelCase :Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase :List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(__snake_case , max_length=len(__snake_case ) , padding=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase :List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
@require_torch
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __snake_case )
self.assertIn('''attention_mask''' , __snake_case )
self.assertNotIn('''labels''' , __snake_case )
self.assertNotIn('''decoder_attention_mask''' , __snake_case )
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Union[str, Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :List[Any] = tokenizer(text_target=__snake_case , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def snake_case ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__snake_case , truncation=__snake_case , return_tensors='''pt''' )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = ['''A long paragraph for summarization.''']
lowerCamelCase :Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Union[str, Any] = tokenizer(__snake_case , return_tensors='''pt''' )
lowerCamelCase :Any = tokenizer(text_target=__snake_case , return_tensors='''pt''' )
lowerCamelCase :Optional[int] = inputs['''input_ids''']
lowerCamelCase :Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase :Optional[int] = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase :List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase :Optional[int] = tokenizer(__snake_case , padding=__snake_case )
lowerCamelCase :Union[str, Any] = [[0] * len(__snake_case ) for x in encoded_output['''input_ids''']]
lowerCamelCase :str = tokenizer.pad(__snake_case )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __snake_case )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :int = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase :str = tokenizer_r.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
lowerCamelCase :str = tokenizer_p.encode_plus(__snake_case , add_special_tokens=__snake_case , return_token_type_ids=__snake_case )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase :Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 49
| 0
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase = MobileBertTokenizer
_UpperCAmelCase = MobileBertTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = filter_non_english
_UpperCAmelCase = 'google/mobilebert-uncased'
def snake_case ( self : int ):
super().setUp()
lowerCamelCase :Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase :List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def snake_case ( self : str , __snake_case : List[Any] ):
lowerCamelCase :Dict = '''UNwant\u00E9d,running'''
lowerCamelCase :Any = '''unwanted, running'''
return input_text, output_text
def snake_case ( self : List[str] ):
lowerCamelCase :int = self.tokenizer_class(self.vocab_file )
lowerCamelCase :Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__snake_case , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [9, 6, 7, 12, 10, 11] )
def snake_case ( self : str ):
if not self.test_rust_tokenizer:
return
lowerCamelCase :List[Any] = self.get_tokenizer()
lowerCamelCase :Union[str, Any] = self.get_rust_tokenizer()
lowerCamelCase :Union[str, Any] = '''UNwant\u00E9d,running'''
lowerCamelCase :List[Any] = tokenizer.tokenize(__snake_case )
lowerCamelCase :str = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :str = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
lowerCamelCase :Any = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :int = self.get_rust_tokenizer()
lowerCamelCase :List[Any] = tokenizer.encode(__snake_case )
lowerCamelCase :Optional[int] = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# With lower casing
lowerCamelCase :Tuple = self.get_tokenizer(do_lower_case=__snake_case )
lowerCamelCase :str = self.get_rust_tokenizer(do_lower_case=__snake_case )
lowerCamelCase :Optional[Any] = '''UNwant\u00E9d,running'''
lowerCamelCase :int = tokenizer.tokenize(__snake_case )
lowerCamelCase :Optional[Any] = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
lowerCamelCase :Tuple = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :Dict = self.get_rust_tokenizer()
lowerCamelCase :Tuple = tokenizer.encode(__snake_case )
lowerCamelCase :Optional[Any] = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def snake_case ( self : Optional[int] ):
lowerCamelCase :Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def snake_case ( self : Dict ):
lowerCamelCase :Optional[int] = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def snake_case ( self : List[str] ):
lowerCamelCase :Optional[Any] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def snake_case ( self : str ):
lowerCamelCase :Optional[int] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def snake_case ( self : str ):
lowerCamelCase :Tuple = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def snake_case ( self : int ):
lowerCamelCase :Union[str, Any] = BasicTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :List[Any] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def snake_case ( self : List[Any] ):
lowerCamelCase :Optional[Any] = BasicTokenizer(do_lower_case=__snake_case , strip_accents=__snake_case )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def snake_case ( self : Dict ):
lowerCamelCase :Tuple = BasicTokenizer(do_lower_case=__snake_case , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase :Optional[Any] = {}
for i, token in enumerate(__snake_case ):
lowerCamelCase :List[Any] = i
lowerCamelCase :int = WordpieceTokenizer(vocab=__snake_case , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def snake_case ( self : str ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def snake_case ( self : Optional[int] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def snake_case ( self : List[Any] ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.get_tokenizer()
lowerCamelCase :str = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__snake_case ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__snake_case ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def snake_case ( self : int ):
lowerCamelCase :str = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
lowerCamelCase :Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
lowerCamelCase :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
lowerCamelCase :Any = tokenizer.build_inputs_with_special_tokens(__snake_case )
lowerCamelCase :Tuple = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def snake_case ( self : Dict ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :int = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Union[str, Any] = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase :Optional[int] = tokenizer_r.encode_plus(
__snake_case , return_attention_mask=__snake_case , return_token_type_ids=__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case , )
lowerCamelCase :Tuple = tokenizer_r.do_lower_case if hasattr(__snake_case , '''do_lower_case''' ) else False
lowerCamelCase :Union[str, Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def snake_case ( self : Any ):
lowerCamelCase :Optional[int] = ['''的''', '''人''', '''有''']
lowerCamelCase :List[Any] = ''''''.join(__snake_case )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase :Optional[Any] = True
lowerCamelCase :List[str] = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :List[str] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case )
lowerCamelCase :List[str] = tokenizer_r.convert_ids_to_tokens(__snake_case )
lowerCamelCase :Tuple = tokenizer_p.convert_ids_to_tokens(__snake_case )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowerCamelCase :Optional[Any] = False
lowerCamelCase :Tuple = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :Tuple = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase :str = tokenizer_r.encode(__snake_case , add_special_tokens=__snake_case )
lowerCamelCase :Union[str, Any] = tokenizer_p.encode(__snake_case , add_special_tokens=__snake_case )
lowerCamelCase :Dict = tokenizer_r.convert_ids_to_tokens(__snake_case )
lowerCamelCase :Any = tokenizer_p.convert_ids_to_tokens(__snake_case )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase :List[str] = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(__snake_case )
]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
| 717
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""LayoutLMv2FeatureExtractor"""]
A__ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
| 0
|
from math import ceil
def _lowerCamelCase ( a_ : int = 10_01):
lowerCamelCase :Union[str, Any] = 1
for i in range(1 , int(ceil(n / 2.0))):
lowerCamelCase :Any = 2 * i + 1
lowerCamelCase :str = 2 * i
lowerCamelCase :Tuple = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 718
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def snake_case ( *__snake_case : str , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@require_torch
def snake_case ( self : Union[str, Any] ):
lowerCamelCase :Optional[int] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}],
[{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}],
] , )
lowerCamelCase :Tuple = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@require_tf
def snake_case ( self : Tuple ):
lowerCamelCase :Tuple = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [{'''score''': 0.3_3_3, '''label''': '''a'''}, {'''score''': 0.3_3_3, '''label''': '''b'''}, {'''score''': 0.3_3_3, '''label''': '''c'''}] , )
lowerCamelCase :int = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
[
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
{'''score''': 0.3_3_3, '''label''': ANY(__snake_case )},
],
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowerCamelCase :str = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Optional[Any] = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def snake_case ( self : Optional[Any] ):
lowerCamelCase :Union[str, Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
lowerCamelCase :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCamelCase :Dict = image_classifier(__snake_case , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
] , )
lowerCamelCase :Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{'''score''': 0.5_1_1, '''label''': '''remote'''},
{'''score''': 0.4_8_5, '''label''': '''cat'''},
{'''score''': 0.0_0_4, '''label''': '''plane'''},
],
]
* 5 , )
| 49
| 0
|
from __future__ import annotations
def _lowerCamelCase ( a_ : int | float | str , a_ : int | float | str):
if nth_term == "":
return [""]
lowerCamelCase :List[str] = int(a_)
lowerCamelCase :List[Any] = int(a_)
lowerCamelCase :list[str] = []
for temp in range(int(a_)):
series.append(F"1 / {pow(temp + 1 , int(a_))}" if series else '''1''')
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ = int(input("""Enter the last number (nth term) of the P-Series"""))
A__ = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 719
|
import operator as op
def _lowerCamelCase ( a_ : Tuple):
lowerCamelCase :int = []
lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation
lowerCamelCase :Optional[int] = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''')
print('''-''' * (30 + len(a_)))
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_) # append x to stack
# output in tabular format
print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
else:
lowerCamelCase :Optional[Any] = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
lowerCamelCase :str = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''')
stack.append(
str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , )
return int(stack[0])
if __name__ == "__main__":
A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 49
| 0
|
from math import sqrt
def _lowerCamelCase ( a_ : int):
lowerCamelCase :List[Any] = 0
for i in range(1 , int(sqrt(a_) + 1)):
if n % i == 0 and i != sqrt(a_):
total += i + n // i
elif i == sqrt(a_):
total += i
return total - n
def _lowerCamelCase ( a_ : int = 1_00_00):
lowerCamelCase :int = sum(
i
for i in range(1 , a_)
if sum_of_divisors(sum_of_divisors(a_)) == i and sum_of_divisors(a_) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 720
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
A__ = """Hello, World!"""
A__ = """en_XX"""
def _lowerCamelCase ( a_ : str , a_ : str , a_ : bool):
lowerCamelCase :int = Path('''data_bin''')
lowerCamelCase :Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , )
xmod.eval() # disable dropout
print(a_)
lowerCamelCase :Any = xmod.model.encoder.sentence_encoder
lowerCamelCase :List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a_)
lowerCamelCase :List[Any] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_)
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase :Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase :Tuple = xmod_sent_encoder.embed_positions.weight
lowerCamelCase :List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
lowerCamelCase :List[Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase :Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
lowerCamelCase :Union[str, Any] = model.roberta.encoder.layer[i]
lowerCamelCase :List[str] = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase :Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError('''Dimensions of self-attention weights do not match.''')
lowerCamelCase :Optional[int] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase :List[str] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase :str = xmod_layer.self_attn.k_proj.weight
lowerCamelCase :Optional[Any] = xmod_layer.self_attn.k_proj.bias
lowerCamelCase :Dict = xmod_layer.self_attn.v_proj.weight
lowerCamelCase :Optional[int] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase :Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''')
lowerCamelCase :List[Any] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase :Union[str, Any] = xmod_layer.self_attn.out_proj.bias
lowerCamelCase :str = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase :List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase :Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''')
lowerCamelCase :int = xmod_layer.fca.weight
lowerCamelCase :Union[str, Any] = xmod_layer.fca.bias
# output
lowerCamelCase :List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''')
lowerCamelCase :str = xmod_layer.fca.weight
lowerCamelCase :int = xmod_layer.fca.bias
lowerCamelCase :List[Any] = xmod_layer.final_layer_norm.weight
lowerCamelCase :List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase :List[str] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase :int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError('''Lists of language adapters do not match.''')
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase :Optional[int] = bert_output.adapter_modules[lang_code]
lowerCamelCase :Dict = xmod_layer.adapter_modules[lang_code]
lowerCamelCase :List[Any] = from_adapter.fca.weight
lowerCamelCase :List[Any] = from_adapter.fca.bias
lowerCamelCase :Dict = from_adapter.fca.weight
lowerCamelCase :Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase :Dict = xmod_sent_encoder.layer_norm.weight
lowerCamelCase :List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase :Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase :Tuple = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase :Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase :List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase :int = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase :Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase :List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase :Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase :str = xmod.encode(a_).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(a_)
lowerCamelCase :Any = model(a_)[0]
if classification_head:
lowerCamelCase :Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_))
else:
lowerCamelCase :int = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
lowerCamelCase :List[str] = torch.max(torch.abs(our_output - their_output)).item()
print(F"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
lowerCamelCase :str = torch.allclose(a_ , a_ , atol=1e-3)
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''')
if not success:
raise Exception('''Something went wRoNg''')
Path(a_).mkdir(parents=a_ , exist_ok=a_)
print(F"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
A__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 49
| 0
|
def _lowerCamelCase ( a_ : dict):
lowerCamelCase :set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowerCamelCase :set[int] = set()
return any(
node not in visited and depth_first_search(a_ , a_ , a_ , a_)
for node in graph)
def _lowerCamelCase ( a_ : dict , a_ : int , a_ : set , a_ : set):
visited.add(a_)
rec_stk.add(a_)
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a_ , a_ , a_ , a_):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a_)
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 721
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'roberta-prelayernorm'
def __init__( self : str , __snake_case : List[str]=50265 , __snake_case : Union[str, Any]=768 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Any=3072 , __snake_case : Optional[int]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=512 , __snake_case : Dict=2 , __snake_case : int=0.0_2 , __snake_case : Any=1e-1_2 , __snake_case : Optional[int]=1 , __snake_case : Dict=0 , __snake_case : Optional[int]=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : List[str]=None , **__snake_case : Optional[int] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCamelCase :Optional[int] = vocab_size
lowerCamelCase :Dict = hidden_size
lowerCamelCase :Tuple = num_hidden_layers
lowerCamelCase :Optional[int] = num_attention_heads
lowerCamelCase :Any = hidden_act
lowerCamelCase :List[Any] = intermediate_size
lowerCamelCase :Union[str, Any] = hidden_dropout_prob
lowerCamelCase :str = attention_probs_dropout_prob
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :int = type_vocab_size
lowerCamelCase :Optional[Any] = initializer_range
lowerCamelCase :Union[str, Any] = layer_norm_eps
lowerCamelCase :Dict = position_embedding_type
lowerCamelCase :List[Any] = use_cache
lowerCamelCase :Optional[int] = classifier_dropout
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowerCamelCase :Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : List[Any] = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]="relu" , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Union[str, Any]=None , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowerCAmelCase )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a__( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModel(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ : Optional[int] = False
__magic_name__ : List[str] = False
__magic_name__ : Dict = False
def a__( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
return
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def a__( self : Any )-> List[str]:
"""simple docstring"""
pass
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Tuple )-> int:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ):
return model(pixel_values=lowerCAmelCase , **lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Dict )-> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 50
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.