code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
_UpperCamelCase : Tuple = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
_UpperCamelCase : List[Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
_UpperCamelCase : int = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def _UpperCAmelCase ( self , a , a , a=None ) -> str:
return {
"matthews_correlation": float(matthews_corrcoef(a , a , sample_weight=a ) ),
}
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
_UpperCamelCase : Dict = logging.getLogger(__name__)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a , a , a=None ) -> Optional[int]:
super().__init__(
a , question_encoder_tokenizer=a , generator_tokenizer=a , index=a , init_retrieval=a , )
lowercase__ : Any = None
def _UpperCAmelCase ( self , a ) -> int:
logger.info('initializing retrieval' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized' )
# needs to be set manually
lowercase__ : int = self._infer_socket_ifname()
# avoid clash with the NCCL port
lowercase__ : int = str(distributed_port + 1 )
lowercase__ : str = dist.new_group(ranks=a , backend='gloo' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _UpperCAmelCase ( self ) -> Optional[Any]:
return dist.get_rank(group=self.process_group ) == 0
def _UpperCAmelCase ( self , a , a , a=torch.floataa ) -> str:
lowercase__ : str = torch.empty(a , dtype=a )
dist.scatter(a , src=0 , scatter_list=a , group=self.process_group )
return target_tensor
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
lowercase__ : List[str] = next((addr for addr in addrs if addr.startswith('e' )) , a )
return ifname
def _UpperCAmelCase ( self , a , a ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
lowercase__ , lowercase__ : Tuple = self._main_retrieve(a , a )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(a )
# distributed training
lowercase__ : List[Any] = dist.get_world_size(group=self.process_group )
# gather logic
lowercase__ : List[Any] = None
if self._is_main():
lowercase__ : str = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(a )]
dist.gather(torch.tensor(a ) , dst=0 , gather_list=a , group=self.process_group )
# scatter logic
lowercase__ : List[str] = question_hidden_states.shape[0]
lowercase__ : List[str] = []
lowercase__ : List[str] = []
if self._is_main():
assert len(a ) == world_size
lowercase__ , lowercase__ : List[str] = self._main_retrieve(torch.cat(a ).numpy() , a )
lowercase__ , lowercase__ : Any = torch.tensor(a ), torch.tensor(a )
lowercase__ : Any = self._chunk_tensor(a , a )
lowercase__ : Tuple = self._chunk_tensor(a , a )
lowercase__ : int = self._scattered(a , [n_queries, n_docs] , target_type=torch.intaa )
lowercase__ : List[str] = self._scattered(a , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(a )
| 645
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : list ):
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowercase__ : List[Any] = grid[0]
for row_n in range(1 , len(_lowerCAmelCase ) ):
lowercase__ : Any = grid[row_n]
lowercase__ : List[Any] = fill_row(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : int = grid[row_n]
return grid[-1][-1]
def a_ ( _lowerCAmelCase : list , _lowerCAmelCase : list ):
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(_lowerCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Tuple = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : int = []
for i in range(self.batch_size ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a )
lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a )
labels.append(a )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[str] = YolosModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = YolosForObjectDetection(a )
model.to(a )
model.eval()
lowercase__ : Dict = model(pixel_values=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
def _UpperCAmelCase ( self , a , a , a=False ) -> Dict:
lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Dict = {}
lowercase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=a , dtype=torch.long )
lowercase__ : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=a , dtype=torch.float )
labels.append(a )
lowercase__ : Union[str, Any] = labels
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = YolosModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
# in YOLOS, the seq_len is different
lowercase__ : Tuple = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Dict = len(a )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : int = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(a , a , a ):
lowercase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(a , a ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
# YOLOS has a different seq_length
lowercase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(a , a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = YolosModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , )
lowercase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a )
lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7]
lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
| 645
| 1
|
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCAmelCase_ ( _a):
def __init__( self , a , a , a = None , a = None , a = False , **a , ) -> Tuple:
super().__init__(features=a , cache_dir=a , keep_in_memory=a , **a )
lowercase__ : str = Sql(
cache_dir=a , features=a , sql=a , con=a , **a , )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[Any] = None
lowercase__ : Optional[Any] = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[Any] = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , )
# Build dataset for splits
lowercase__ : Union[str, Any] = self.builder.as_dataset(
split='train' , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase_ :
def __init__( self , a , a , a , a = None , a = None , **a , ) -> Dict:
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowercase__ : Optional[int] = dataset
lowercase__ : Any = name
lowercase__ : Tuple = con
lowercase__ : Optional[int] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase__ : Union[str, Any] = num_proc
lowercase__ : List[str] = to_sql_kwargs
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Tuple = self.to_sql_kwargs.pop('sql' , a )
lowercase__ : List[str] = self.to_sql_kwargs.pop('con' , a )
lowercase__ : int = self.to_sql_kwargs.pop('index' , a )
lowercase__ : str = self._write(index=a , **self.to_sql_kwargs )
return written
def _UpperCAmelCase ( self , a ) -> List[str]:
lowercase__ , lowercase__ , lowercase__ : List[str] = args
lowercase__ : List[Any] = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
lowercase__ : str = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase__ : Optional[Any] = batch.to_pandas()
lowercase__ : Optional[Any] = df.to_sql(self.name , self.con , index=a , **a )
return num_rows or len(a )
def _UpperCAmelCase ( self , a , **a ) -> int:
lowercase__ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
lowercase__ , lowercase__ : Union[str, Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 645
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645
| 1
|
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_UpperCamelCase : Optional[int] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_UpperCamelCase : Optional[int] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def a_ ( ):
'''simple docstring'''
lowercase__ : int = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , bootstrap_aggregation=_lowerCAmelCase , rouge_keys=['rouge2', 'rougeL'] )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Optional[Any] = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , bootstrap_aggregation=_lowerCAmelCase , rouge_keys=['rouge2'] )
assert (
pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()
)
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = 'rougeLsum'
lowercase__ : int = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=[k] )[k]
lowercase__ : Tuple = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def a_ ( ):
'''simple docstring'''
lowercase__ : int = ['rouge1', 'rouge2', 'rougeL']
lowercase__ : str = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=_lowerCAmelCase )
lowercase__ : Optional[Any] = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase , rouge_keys=_lowerCAmelCase )
assert score_sep == score_no_sep
def a_ ( ):
'''simple docstring'''
lowercase__ : List[str] = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .',
]
lowercase__ : Optional[int] = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , newline_sep=_lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : Tuple = [
'" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '
]
lowercase__ : Optional[Any] = [
' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
lowercase__ : Tuple = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , rouge_keys=['rougeLsum'] , newline_sep=_lowerCAmelCase )['rougeLsum']
lowercase__ : List[str] = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , rouge_keys=['rougeLsum'] )['rougeLsum']
assert new_score > prev_score
def a_ ( ):
'''simple docstring'''
lowercase__ : str = Path('examples/seq2seq/test_data/wmt_en_ro' )
lowercase__ : Any = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Tuple = calculate_rouge_path(
data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
| 645
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : Union[str, Any] = 32
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCAmelCase_ :
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a ) -> Any:
gc.collect()
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = torch.cuda.memory_allocated()
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase__ : List[Any] = bamb(self.end - self.begin )
lowercase__ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[int] = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : int = int(config['batch_size'] )
lowercase__ : Union[str, Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Tuple = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : List[Any] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , )
lowercase__ : Any = parser.parse_args()
lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
| 1
|
"""simple docstring"""
_UpperCamelCase : int = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_UpperCamelCase : list[bool | None] = [None] * 10_00_00_00
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : List[Any] = False
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase__ : Tuple = chain(next_number(_lowerCAmelCase ) )
lowercase__ : List[Any] = number_chain
while number < 1000_0000:
lowercase__ : Tuple = number_chain
number *= 10
return number_chain
def a_ ( _lowerCAmelCase : int = 1000_0000 ):
'''simple docstring'''
for i in range(1 , _lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 645
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = [0] * len(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
lowercase__ : List[str] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : Union[str, Any] = j
return prefix_result
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
return max(prefix_function(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
_UpperCamelCase : str = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def a_ ( _lowerCAmelCase : bytes ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase__ : Optional[int] = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_lowerCAmelCase )
lowercase__ : str = ''.join(bin(_lowerCAmelCase )[2:].zfill(8 ) for byte in data )
lowercase__ : str = len(_lowerCAmelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ : Dict = B'=' * ((6 - len(_lowerCAmelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowerCAmelCase ) % 6)
else:
lowercase__ : str = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowerCAmelCase ) , 6 ) ).encode()
+ padding
)
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase__ : str = (
'argument should be a bytes-like object or ASCII string, '
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_lowerCAmelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
lowercase__ : int = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
lowercase__ : List[Any] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowerCAmelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ : Tuple = encoded_data[:-padding]
lowercase__ : Optional[int] = ''.join(
bin(B64_CHARSET.index(_lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ : List[str] = ''.join(
bin(B64_CHARSET.index(_lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ : Any = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowerCAmelCase ) , 8 )
]
return bytes(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]:
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0}
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : str = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : int = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : str = do_center_crop
lowercase__ : List[Any] = crop_size
lowercase__ : Union[str, Any] = do_flip_channel_order
def _UpperCAmelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = MobileViTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_flip_channel_order' ) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 645
| 1
|
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
_UpperCamelCase : List[Any] = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
_UpperCamelCase : Tuple = {
"abeja/gpt-neox-japanese-2.7b": 20_48,
}
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
with open(_lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
lowercase__ : Optional[Any] = json.loads(f.read() )
lowercase__ : Dict = collections.OrderedDict()
lowercase__ : Dict = collections.OrderedDict()
lowercase__ : Dict = collections.OrderedDict()
with open(_lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
lowercase__ : Tuple = f.readlines()
lowercase__ : Optional[int] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(_lowerCAmelCase ):
lowercase__ : Dict = b
lowercase__ : Optional[int] = idx
for wd in b:
lowercase__ : Dict = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : List[str] = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : List[str] = ["input_ids", "attention_mask"]
def __init__( self , a , a , a="<|endoftext|>" , a="<|endoftext|>" , a="<|startoftext|>" , a="<|endoftext|>" , a=False , **a , ) -> Union[str, Any]:
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(a ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
lowercase__ : Dict = do_clean_text
lowercase__ , lowercase__ , lowercase__ , lowercase__ : int = load_vocab_and_emoji(a , a )
lowercase__ : Dict = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def _UpperCAmelCase ( self ) -> Any:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _UpperCAmelCase ( self , a ) -> Union[str, Any]:
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text )
def _UpperCAmelCase ( self , a ) -> Any:
return self.vocab.get(a , self.vocab.get(self.unk_token ) )
def _UpperCAmelCase ( self , a ) -> Dict:
return self.subword_tokenizer.convert_id_to_token(a )
def _UpperCAmelCase ( self , a ) -> str:
lowercase__ : str = ''.join(a ).strip()
return out_string
def _UpperCAmelCase ( self , a ) -> List[int]:
lowercase__ : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a ) + [self.eos_token_id] )
if len(a ) > self.model_max_length:
lowercase__ : str = input_ids[-self.model_max_length :]
return input_ids
def _UpperCAmelCase ( self , a , a = None ) -> Tuple[str]:
lowercase__ : Optional[int] = 0
if os.path.isdir(a ):
lowercase__ : Optional[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__ : List[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
lowercase__ : Optional[Any] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
lowercase__ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(a , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
lowercase__ : Any = token_index
writer.write(','.join(a ) + '\n' )
index += 1
with open(a , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , a )
return vocab_file, emoji_file
class UpperCAmelCase_ ( _a):
def __init__( self , a , a , a ) -> str:
lowercase__ : Union[str, Any] = vocab # same as swe
lowercase__ : str = ids_to_tokens # same as bpe
lowercase__ : List[str] = emoji
lowercase__ : Optional[Any] = np.max([len(a ) for w in self.vocab.keys()] )
lowercase__ : Union[str, Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
lowercase__ : Optional[int] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
lowercase__ : str = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
lowercase__ : int = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowercase__ : Union[str, Any] = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
lowercase__ : List[Any] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
lowercase__ : Optional[int] = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
lowercase__ : str = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
lowercase__ : Any = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ) -> Any:
return len(self.ids_to_tokens )
def _UpperCAmelCase ( self , a ) -> Any:
lowercase__ : Dict = self.content_repattera.sub('<URL>' , a )
lowercase__ : Optional[Any] = self.content_repattera.sub('<EMAIL>' , a )
lowercase__ : Any = self.content_repattera.sub('<TEL>' , a )
lowercase__ : Any = self.content_repattera.sub('<DATE>' , a )
lowercase__ : Tuple = self.content_repattera.sub('<DATE>' , a )
lowercase__ : Optional[Any] = self.content_repattera.sub('<PRICE>' , a )
lowercase__ : Union[str, Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowercase__ : Any = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def _UpperCAmelCase ( self , a , a=False ) -> int:
lowercase__ : Any = text.replace(' ' , '<SP>' )
lowercase__ : int = text.replace(' ' , '<SP>' )
lowercase__ : Tuple = text.replace('\r\n' , '<BR>' )
lowercase__ : Dict = text.replace('\n' , '<BR>' )
lowercase__ : List[str] = text.replace('\r' , '<BR>' )
lowercase__ : Optional[Any] = text.replace('\t' , '<TAB>' )
lowercase__ : Dict = text.replace('—' , 'ー' )
lowercase__ : List[str] = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowercase__ : Dict = text.replace(a , a )
if clean:
lowercase__ : Optional[Any] = self.clean_text(a )
def check_simbol(a ):
lowercase__ : int = x.encode()
if len(a ) == 1 and len(a ) == 2:
lowercase__ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC_2A1 and c <= 0XC_2BF)
or (c >= 0XC_780 and c <= 0XC_783)
or (c >= 0XC_AB9 and c <= 0XC_BBF)
or (c >= 0XC_C80 and c <= 0XC_DA2)
):
return True
return False
def checkuae(a ):
lowercase__ : Any = x.encode()
if len(a ) == 1 and len(a ) == 3:
lowercase__ : Any = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28_080 and c <= 0XE2B_07F:
return True
return False
lowercase__ : str = 0
lowercase__ : str = []
while pos < len(a ):
lowercase__ : List[str] = min(len(a ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
lowercase__ : Any = [] # (token_id, token, pos)
for e in range(a , a , -1 ):
lowercase__ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a ) > 2:
lowercase__ : Tuple = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(a ) > 0:
# the smallest token_id is adopted
lowercase__ , lowercase__ , lowercase__ : List[str] = sorted(a , key=lambda a : x[0] )[0]
result.append(a )
lowercase__ : Any = e
else:
lowercase__ : Any = pos + 1
lowercase__ : Optional[Any] = text[pos:end]
if check_simbol(a ):
result.append('<KIGOU>' )
elif checkuae(a ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
lowercase__ : int = end
return result
def _UpperCAmelCase ( self , a , a="\n" ) -> Optional[int]:
lowercase__ : Optional[Any] = []
lowercase__ : Optional[int] = []
lowercase__ : List[Any] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(a ) > 0:
words.append(bytearray(a ).decode('utf-8' , errors='replace' ) )
lowercase__ : Any = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(a )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(a )
if len(a ) > 0:
words.append(bytearray(a ).decode('utf-8' , errors='replace' ) )
lowercase__ : Union[str, Any] = ''.join(a )
return text
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict:
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = num_choices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' )
lowercase__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Any = model(a , attention_mask=a )[0]
lowercase__ : Tuple = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a )
lowercase__ : Optional[Any] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self , a ) -> None:
lowercase__ : Optional[Any] = num_of_nodes
lowercase__ : list[list[int]] = []
lowercase__ : dict[int, int] = {}
def _UpperCAmelCase ( self , a , a , a ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase ( self , a ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase ( self , a ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowercase__ : Optional[int] = self.find_component(a )
def _UpperCAmelCase ( self , a , a , a ) -> None:
if component_size[u_node] <= component_size[v_node]:
lowercase__ : Any = v_node
component_size[v_node] += component_size[u_node]
self.set_component(a )
elif component_size[u_node] >= component_size[v_node]:
lowercase__ : Any = self.find_component(a )
component_size[u_node] += component_size[v_node]
self.set_component(a )
def _UpperCAmelCase ( self ) -> None:
lowercase__ : Optional[Any] = []
lowercase__ : Tuple = 0
lowercase__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowercase__ : Optional[int] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowercase__ , lowercase__ , lowercase__ : List[Any] = edge
lowercase__ : Optional[Any] = self.m_component[u]
lowercase__ : Dict = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowercase__ : Dict = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(a , a ):
lowercase__ , lowercase__ , lowercase__ : List[Any] = edge
lowercase__ : Dict = self.m_component[u]
lowercase__ : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(a , a , a )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
lowercase__ : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
from collections.abc import Sequence
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
lowercase__ : int = 0.0
for coeff in reversed(_lowerCAmelCase ):
lowercase__ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase : Dict = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 645
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_UpperCamelCase : Any = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCamelCase : Any = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def a_ ( _lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a))
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = None
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a )
lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a )
self.assertTrue(os.path.exists(a ) )
@pytest.mark.integration
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : Optional[int] = import_main_class(dataset_module.module_path )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__ : Optional[int] = None
builder_instance.download_and_prepare()
lowercase__ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 645
| 1
|
"""simple docstring"""
import os
import numpy
import onnx
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
lowercase__ : List[str] = a.name
lowercase__ : Union[str, Any] = b.name
lowercase__ : Dict = ''
lowercase__ : Tuple = ''
lowercase__ : str = a == b
lowercase__ : List[Any] = name_a
lowercase__ : Union[str, Any] = name_b
return res
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCAmelCase , _lowerCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCAmelCase , _lowerCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCAmelCase , _lowerCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCAmelCase , _lowerCAmelCase )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
lowercase__ : List[str] = list(model.graph.initializer )
lowercase__ : List[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase__ : Optional[Any] = inits[i].name
lowercase__ : Dict = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCAmelCase , _lowerCAmelCase )
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Any = os.path.dirname(_lowerCAmelCase )
lowercase__ : List[str] = os.path.basename(_lowerCAmelCase )
lowercase__ : Any = onnx.load(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase__ : Optional[Any] = list(model.graph.initializer )
lowercase__ : Dict = set()
lowercase__ : Tuple = {}
lowercase__ : int = []
lowercase__ : Optional[int] = 0
for i in range(len(_lowerCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCAmelCase )
dup_set.add(_lowerCAmelCase )
lowercase__ : Optional[int] = inits[j].data_type
lowercase__ : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , _lowerCAmelCase )
total_reduced_size += mem_size
lowercase__ : Tuple = inits[i].name
lowercase__ : Optional[int] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCAmelCase )
else:
lowercase__ : Tuple = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
lowercase__ : int = sorted(_lowerCAmelCase )
_remove_dup_initializers_from_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Tuple = 'optimized_' + model_file_name
lowercase__ : Union[str, Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
onnx.save(_lowerCAmelCase , _lowerCAmelCase )
return new_model
| 645
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a_ ( _lowerCAmelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
'''simple docstring'''
lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowerCAmelCase , _lowerCAmelCase )
# Predict target for test data
lowercase__ : str = xgb.predict(_lowerCAmelCase )
lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 )
return predictions
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = fetch_california_housing()
lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split(
_lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 )
lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 645
| 1
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : list[float] ):
'''simple docstring'''
lowercase__ : str = 0.0_0
lowercase__ : int = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ : Optional[Any] = f"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(_lowerCAmelCase )
first_sum += 1 / float(_lowerCAmelCase )
index += 1
return 1 / first_sum
def a_ ( _lowerCAmelCase : list[float] ):
'''simple docstring'''
lowercase__ : str = 0.0_0
lowercase__ : Any = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ : int = f"""Resistor at index {index} has a negative value!"""
raise ValueError(_lowerCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]:
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = patch_size
lowercase__ : Tuple = tubelet_size
lowercase__ : Optional[int] = num_frames
lowercase__ : Optional[int] = is_training
lowercase__ : int = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = mask_ratio
lowercase__ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase__ : Optional[Any] = (image_size // patch_size) ** 2
lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase__ : str = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = VideoMAEModel(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Any = torch.ones((self.num_masks,) )
lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase__ : str = model(a , a )
# model only returns predictions for masked patches
lowercase__ : str = mask.sum().item()
lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : str = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VideoMAEModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]:
lowercase__ : Union[str, Any] = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase__ : Union[str, Any] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase__ : Optional[Any] = True
lowercase__ : int = False
lowercase__ : Any = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : List[str] = len(a )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a , a , a ):
lowercase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase__ : str = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
a )
lowercase__ : str = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a )
# add boolean mask, indicating which patches to mask
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowercase__ : str = torch.load(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase__ : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to(
a )
with torch.no_grad():
lowercase__ : Any = model(**a )
lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase_ :
def __init__( self , a ) -> Optional[Any]:
lowercase__ : List[Any] = data
lowercase__ : Optional[int] = [0X67_452_301, 0XEF_CDA_B89, 0X98_BAD_CFE, 0X10_325_476, 0XC3_D2E_1F0]
@staticmethod
def _UpperCAmelCase ( a , a ) -> Tuple:
return ((n << b) | (n >> (3_2 - b))) & 0XFF_FFF_FFF
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = b'\x80' + b'\x00' * (6_3 - (len(self.data ) + 8) % 6_4)
lowercase__ : Dict = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) )
return padded_data
def _UpperCAmelCase ( self ) -> List[Any]:
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 )
]
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Optional[Any] = list(struct.unpack('>16L' , a ) ) + [0] * 6_4
for i in range(1_6 , 8_0 ):
lowercase__ : Optional[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 )
return w
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Union[str, Any] = self.padding()
lowercase__ : int = self.split_blocks()
for block in self.blocks:
lowercase__ : Tuple = self.expand_block(a )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = self.h
for i in range(0 , 8_0 ):
if 0 <= i < 2_0:
lowercase__ : Tuple = (b & c) | ((~b) & d)
lowercase__ : Any = 0X5A_827_999
elif 2_0 <= i < 4_0:
lowercase__ : Tuple = b ^ c ^ d
lowercase__ : int = 0X6E_D9E_BA1
elif 4_0 <= i < 6_0:
lowercase__ : Any = (b & c) | (b & d) | (c & d)
lowercase__ : Optional[int] = 0X8F_1BB_CDC
elif 6_0 <= i < 8_0:
lowercase__ : Any = b ^ c ^ d
lowercase__ : List[Any] = 0XCA_62C_1D6
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = (
self.rotate(a , 5 ) + f + e + k + expanded_block[i] & 0XFF_FFF_FFF,
a,
self.rotate(a , 3_0 ),
c,
d,
)
lowercase__ : Optional[int] = (
self.h[0] + a & 0XFF_FFF_FFF,
self.h[1] + b & 0XFF_FFF_FFF,
self.h[2] + c & 0XFF_FFF_FFF,
self.h[3] + d & 0XFF_FFF_FFF,
self.h[4] + e & 0XFF_FFF_FFF,
)
return ("{:08x}" * 5).format(*self.h )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = B'Test String'
assert SHAaHash(_lowerCAmelCase ).final_hash() == hashlib.shaa(_lowerCAmelCase ).hexdigest() # noqa: S324
def a_ ( ):
'''simple docstring'''
lowercase__ : List[str] = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ : List[str] = parser.parse_args()
lowercase__ : Dict = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ : Optional[Any] = f.read()
else:
lowercase__ : Optional[Any] = bytes(_lowerCAmelCase , 'utf-8' )
print(SHAaHash(_lowerCAmelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 645
| 1
|
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=5 ):
'''simple docstring'''
assert masked_input.count('<mask>' ) == 1
lowercase__ : Tuple = torch.tensor(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) ).unsqueeze(0 ) # Batch size 1
lowercase__ : str = model(_lowerCAmelCase )[0] # The last hidden-state is the first element of the output tuple
lowercase__ : List[Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowercase__ : Optional[int] = logits[0, masked_index, :]
lowercase__ : Dict = logits.softmax(dim=0 )
lowercase__ , lowercase__ : Optional[Any] = prob.topk(k=_lowerCAmelCase , dim=0 )
lowercase__ : Dict = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_lowerCAmelCase ) )] )
lowercase__ : int = tokenizer.mask_token
lowercase__ : Union[str, Any] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
lowercase__ : List[str] = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(_lowerCAmelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(_lowerCAmelCase ) , _lowerCAmelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_lowerCAmelCase , _lowerCAmelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_UpperCamelCase : Tuple = CamembertTokenizer.from_pretrained("camembert-base")
_UpperCamelCase : List[Any] = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
_UpperCamelCase : Tuple = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 645
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int:
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = embed_dim
lowercase__ : Any = depths
lowercase__ : Dict = num_heads
lowercase__ : List[str] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Tuple = qkv_bias
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = drop_path_rate
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = use_absolute_embeddings
lowercase__ : Optional[Any] = patch_norm
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : List[str] = is_training
lowercase__ : int = scope
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : List[str] = encoder_stride
lowercase__ : Optional[Any] = out_features
lowercase__ : Dict = out_indices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Tuple = MaskFormerSwinModel(config=a )
model.to(a )
model.eval()
lowercase__ : str = model(a )
lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(a ):
lowercase__ : Dict = ['stem']
lowercase__ : List[str] = MaskFormerSwinBackbone(config=a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = MaskFormerSwinModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> str:
return
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
@unittest.skip('Swin does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple:
lowercase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swin has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = 3
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a ):
lowercase__ : Union[str, Any] = 0
return t
def check_equivalence(a , a , a , a={} ):
with torch.no_grad():
lowercase__ : Optional[Any] = model(**a , return_dict=a , **a )
lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple()
def recursive_check(a , a ):
if isinstance(a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a , a ):
recursive_check(a , a )
elif isinstance(a , a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a , a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has"""
f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}."""
) , )
recursive_check(a , a )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
model.to(a )
model.eval()
lowercase__ : Tuple = self._prepare_for_class(a , a )
lowercase__ : Optional[Any] = self._prepare_for_class(a , a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a )
lowercase__ : int = self._prepare_for_class(a , a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _a):
lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ : Optional[Any] = backbone_class(a )
backbone.to(a )
backbone.eval()
lowercase__ : Union[str, Any] = backbone(**a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ : List[str] = backbone(**a , output_hidden_states=a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ : List[Any] = backbone(**a , output_attentions=a )
self.assertIsNotNone(outputs.attentions )
| 645
| 1
|
"""simple docstring"""
import os
import string
import sys
_UpperCamelCase : str = 1 << 8
_UpperCamelCase : List[Any] = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
_UpperCamelCase : Union[str, Any] = KEYMAP["up"]
_UpperCamelCase : List[str] = KEYMAP["left"]
if sys.platform == "win32":
_UpperCamelCase : List[Any] = []
_UpperCamelCase : Optional[Any] = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
_UpperCamelCase : Tuple = ord(str(i))
def a_ ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowercase__ : Optional[Any] = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_lowerCAmelCase ) == 0:
# Read the keystroke
lowercase__ : Optional[int] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowercase__ : Optional[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowercase__ : Tuple = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(_lowerCAmelCase )
if ord(_lowerCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowercase__ : Optional[Any] = chr(KEYMAP['esc'] )
except KeyError:
lowercase__ : str = cha[1]
else:
lowercase__ : Optional[int] = ch.decode(_lowerCAmelCase )
else:
lowercase__ : int = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowercase__ : List[str] = sys.stdin.fileno()
lowercase__ : List[Any] = termios.tcgetattr(_lowerCAmelCase )
try:
tty.setraw(_lowerCAmelCase )
lowercase__ : Tuple = sys.stdin.read(1 )
finally:
termios.tcsetattr(_lowerCAmelCase , termios.TCSADRAIN , _lowerCAmelCase )
return ch
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = get_raw_chars()
if ord(_lowerCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_lowerCAmelCase ) == KEYMAP["esc"]:
lowercase__ : Any = get_raw_chars()
if ord(_lowerCAmelCase ) == KEYMAP["mod_int"]:
lowercase__ : Tuple = get_raw_chars()
if ord(_lowerCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_lowerCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_lowerCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 645
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
| 1
|
"""simple docstring"""
from itertools import permutations
def a_ ( _lowerCAmelCase : tuple ):
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowercase__ : Optional[int] = [7, 11, 13, 17]
for i, test in enumerate(_lowerCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def a_ ( _lowerCAmelCase : int = 10 ):
'''simple docstring'''
return sum(
int(''.join(map(_lowerCAmelCase , _lowerCAmelCase ) ) )
for num in permutations(range(_lowerCAmelCase ) )
if is_substring_divisible(_lowerCAmelCase ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : List[Any] = controlnet_params
lowercase__ : int = 'bird'
lowercase__ : List[Any] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__ : List[Any] = jax.random.PRNGKey(0 )
lowercase__ : Tuple = jax.random.split(a , jax.device_count() )
lowercase__ : str = replicate(a )
lowercase__ : List[str] = shard(a )
lowercase__ : Dict = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Optional[Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : Optional[Any] = controlnet_params
lowercase__ : List[Any] = 'Chef in the kitchen'
lowercase__ : List[str] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__ : List[str] = jax.random.PRNGKey(0 )
lowercase__ : str = jax.random.split(a , jax.device_count() )
lowercase__ : Optional[Any] = replicate(a )
lowercase__ : Optional[Any] = shard(a )
lowercase__ : List[Any] = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : str = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 645
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Any = len(_lowerCAmelCase )
lowercase__ : List[str] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowercase__ : str = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowercase__ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowercase__ : Optional[Any] = subset[i - 1][j]
if arr[i - 1] <= j:
lowercase__ : str = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 645
| 1
|
"""simple docstring"""
import sys
_UpperCamelCase : Any = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Dict = 1
for digit in s:
product *= int(_lowerCAmelCase )
return product
def a_ ( _lowerCAmelCase : str = N ):
'''simple docstring'''
lowercase__ : List[str] = -sys.maxsize - 1
lowercase__ : List[Any] = n[:13]
lowercase__ : Tuple = 13
while cur_index < len(_lowerCAmelCase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
lowercase__ : Any = substr[1:] + n[cur_index]
cur_index += 1
else:
lowercase__ : Dict = max(_lowerCAmelCase , str_eval(_lowerCAmelCase ) )
lowercase__ : int = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : torch.FloatTensor
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a=3 , a=3 , a=("DownEncoderBlock2D",) , a=(6_4,) , a=2 , a=3_2 , a="silu" , a=True , ) -> str:
super().__init__()
lowercase__ : Tuple = layers_per_block
lowercase__ : Union[str, Any] = torch.nn.Convad(
a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : int = None
lowercase__ : str = nn.ModuleList([] )
# down
lowercase__ : int = block_out_channels[0]
for i, down_block_type in enumerate(a ):
lowercase__ : List[Any] = output_channel
lowercase__ : Optional[int] = block_out_channels[i]
lowercase__ : Union[str, Any] = i == len(a ) - 1
lowercase__ : Tuple = get_down_block(
a , num_layers=self.layers_per_block , in_channels=a , out_channels=a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=a , resnet_groups=a , attention_head_dim=a , temb_channels=a , )
self.down_blocks.append(a )
# mid
lowercase__ : int = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=a , temb_channels=a , )
# out
lowercase__ : Dict = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=a , eps=1e-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Dict = 2 * out_channels if double_z else out_channels
lowercase__ : List[Any] = nn.Convad(block_out_channels[-1] , a , 3 , padding=1 )
lowercase__ : Dict = False
def _UpperCAmelCase ( self , a ) -> Optional[int]:
lowercase__ : Dict = x
lowercase__ : Optional[Any] = self.conv_in(a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(a ):
def custom_forward(*a ):
return module(*a )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
lowercase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(a ) , a , use_reentrant=a )
# middle
lowercase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , use_reentrant=a )
else:
for down_block in self.down_blocks:
lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(a ) , a )
# middle
lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , a )
else:
# down
for down_block in self.down_blocks:
lowercase__ : int = down_block(a )
# middle
lowercase__ : Optional[int] = self.mid_block(a )
# post-process
lowercase__ : List[Any] = self.conv_norm_out(a )
lowercase__ : Dict = self.conv_act(a )
lowercase__ : Optional[Any] = self.conv_out(a )
return sample
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a=3 , a=3 , a=("UpDecoderBlock2D",) , a=(6_4,) , a=2 , a=3_2 , a="silu" , a="group" , ) -> Dict:
super().__init__()
lowercase__ : List[Any] = layers_per_block
lowercase__ : Optional[int] = nn.Convad(
a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Tuple = None
lowercase__ : Dict = nn.ModuleList([] )
lowercase__ : int = in_channels if norm_type == 'spatial' else None
# mid
lowercase__ : List[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=a , temb_channels=a , )
# up
lowercase__ : Optional[Any] = list(reversed(a ) )
lowercase__ : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(a ):
lowercase__ : str = output_channel
lowercase__ : str = reversed_block_out_channels[i]
lowercase__ : Tuple = i == len(a ) - 1
lowercase__ : List[Any] = get_up_block(
a , num_layers=self.layers_per_block + 1 , in_channels=a , out_channels=a , prev_output_channel=a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=a , resnet_groups=a , attention_head_dim=a , temb_channels=a , resnet_time_scale_shift=a , )
self.up_blocks.append(a )
lowercase__ : Optional[Any] = output_channel
# out
if norm_type == "spatial":
lowercase__ : Optional[Any] = SpatialNorm(block_out_channels[0] , a )
else:
lowercase__ : Union[str, Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=a , eps=1e-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Any = nn.Convad(block_out_channels[0] , a , 3 , padding=1 )
lowercase__ : str = False
def _UpperCAmelCase ( self , a , a=None ) -> Dict:
lowercase__ : List[str] = z
lowercase__ : List[Any] = self.conv_in(a )
lowercase__ : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(a ):
def custom_forward(*a ):
return module(*a )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , a , use_reentrant=a )
lowercase__ : List[Any] = sample.to(a )
# up
for up_block in self.up_blocks:
lowercase__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(a ) , a , a , use_reentrant=a )
else:
# middle
lowercase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , a )
lowercase__ : Optional[Any] = sample.to(a )
# up
for up_block in self.up_blocks:
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(a ) , a , a )
else:
# middle
lowercase__ : Any = self.mid_block(a , a )
lowercase__ : List[Any] = sample.to(a )
# up
for up_block in self.up_blocks:
lowercase__ : Any = up_block(a , a )
# post-process
if latent_embeds is None:
lowercase__ : int = self.conv_norm_out(a )
else:
lowercase__ : Union[str, Any] = self.conv_norm_out(a , a )
lowercase__ : Union[str, Any] = self.conv_act(a )
lowercase__ : Optional[int] = self.conv_out(a )
return sample
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a , a , a=None , a="random" , a=False , a=True ) -> Optional[Any]:
super().__init__()
lowercase__ : int = n_e
lowercase__ : str = vq_embed_dim
lowercase__ : Tuple = beta
lowercase__ : List[Any] = legacy
lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowercase__ : Optional[int] = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
lowercase__ : Optional[int] = self.used.shape[0]
lowercase__ : List[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowercase__ : Any = self.re_embed
lowercase__ : List[Any] = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
lowercase__ : str = n_e
lowercase__ : str = sane_index_shape
def _UpperCAmelCase ( self , a ) -> List[Any]:
lowercase__ : Dict = inds.shape
assert len(a ) > 1
lowercase__ : List[Any] = inds.reshape(ishape[0] , -1 )
lowercase__ : Tuple = self.used.to(a )
lowercase__ : str = (inds[:, :, None] == used[None, None, ...]).long()
lowercase__ : Dict = match.argmax(-1 )
lowercase__ : int = match.sum(2 ) < 1
if self.unknown_index == "random":
lowercase__ : List[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowercase__ : Optional[Any] = self.unknown_index
return new.reshape(a )
def _UpperCAmelCase ( self , a ) -> Tuple:
lowercase__ : List[str] = inds.shape
assert len(a ) > 1
lowercase__ : int = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(a )
if self.re_embed > self.used.shape[0]: # extra token
lowercase__ : Dict = 0 # simply set to zero
lowercase__ : Any = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , a )
return back.reshape(a )
def _UpperCAmelCase ( self , a ) -> List[str]:
# reshape z -> (batch, height, width, channel) and flatten
lowercase__ : str = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowercase__ : List[str] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowercase__ : str = torch.argmin(torch.cdist(a , self.embedding.weight ) , dim=1 )
lowercase__ : str = self.embedding(a ).view(z.shape )
lowercase__ : Union[str, Any] = None
lowercase__ : Tuple = None
# compute loss for embedding
if not self.legacy:
lowercase__ : int = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowercase__ : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowercase__ : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
lowercase__ : List[str] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowercase__ : Optional[int] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowercase__ : Optional[int] = self.remap_to_used(a )
lowercase__ : Union[str, Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowercase__ : Any = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _UpperCAmelCase ( self , a , a ) -> Any:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowercase__ : Optional[int] = indices.reshape(shape[0] , -1 ) # add batch axis
lowercase__ : Any = self.unmap_to_all(a )
lowercase__ : Dict = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowercase__ : Optional[Any] = self.embedding(a )
if shape is not None:
lowercase__ : Dict = z_q.view(a )
# reshape back to match original input shape
lowercase__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=False ) -> Any:
lowercase__ : Optional[Any] = parameters
lowercase__ , lowercase__ : List[str] = torch.chunk(a , 2 , dim=1 )
lowercase__ : Any = torch.clamp(self.logvar , -30.0 , 20.0 )
lowercase__ : Dict = deterministic
lowercase__ : List[str] = torch.exp(0.5 * self.logvar )
lowercase__ : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
lowercase__ : int = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _UpperCAmelCase ( self , a = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
lowercase__ : List[str] = randn_tensor(
self.mean.shape , generator=a , device=self.parameters.device , dtype=self.parameters.dtype )
lowercase__ : Dict = self.mean + self.std * sample
return x
def _UpperCAmelCase ( self , a=None ) -> Union[str, Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _UpperCAmelCase ( self , a , a=[1, 2, 3] ) -> str:
if self.deterministic:
return torch.Tensor([0.0] )
lowercase__ : Optional[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=a )
def _UpperCAmelCase ( self ) -> int:
return self.mean
| 645
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 645
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase : int = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Any = ["PerceiverFeatureExtractor"]
_UpperCamelCase : str = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645
| 1
|
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self , a ) -> Tuple:
# we need a list not a string, so do something to change the type
lowercase__ : int = arr.split(',' )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[Any] = [int(self.array[0] )] * len(self.array )
lowercase__ : Union[str, Any] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
lowercase__ : Optional[int] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
lowercase__ : Any = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_UpperCamelCase : str = input("please input some numbers:")
_UpperCamelCase : List[Any] = SubArray(whole_array)
_UpperCamelCase : Any = array.solve_sub_array()
print(("the results is:", re))
| 645
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Tuple = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self , a ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowercase__ : str = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = 'sshleifer/tiny-gpt2'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : List[Any] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : Tuple = TensorFlowBenchmark(a , [config] )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : List[str] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : List[str] = TensorFlowBenchmark(a , [config] )
lowercase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[int] = AutoConfig.from_pretrained(a )
lowercase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a , [config] )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random'
lowercase__ : Any = AutoConfig.from_pretrained(a )
lowercase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : int = TensorFlowBenchmark(a , configs=[config] )
lowercase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
lowercase__ : Any = TensorFlowBenchmark(a )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , )
lowercase__ : Union[str, Any] = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a ):
self.assertTrue(hasattr(a , 'sequential' ) )
self.assertTrue(hasattr(a , 'cumulative' ) )
self.assertTrue(hasattr(a , 'current' ) )
self.assertTrue(hasattr(a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
lowercase__ : Optional[int] = TensorFlowBenchmark(a )
lowercase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
| 645
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
lowercase__ : Any = gray_code_sequence_string(_lowerCAmelCase )
#
# convert them to integers
for i in range(len(_lowerCAmelCase ) ):
lowercase__ : List[Any] = int(sequence[i] , 2 )
return sequence
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowercase__ : List[str] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowercase__ : List[str] = gray_code_sequence_string(bit_count - 1 )
lowercase__ : Optional[int] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowercase__ : int = '0' + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowercase__ : List[str] = '1' + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=2 , a=5_6 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=2 , a=2 , a=7 , a="gelu_new" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , a="block_sparse" , a=True , a=False , a=2 , a=3 , ) -> int:
lowercase__ : Tuple = parent
lowercase__ : Tuple = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : Optional[Any] = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Union[str, Any] = use_token_type_ids
lowercase__ : Dict = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : List[Any] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Optional[int] = num_choices
lowercase__ : List[Any] = rescale_embeddings
lowercase__ : Any = attention_type
lowercase__ : Optional[int] = use_bias
lowercase__ : Optional[Any] = block_size
lowercase__ : Any = num_random_blocks
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : List[str] = None
if self.use_attention_mask:
lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Union[str, Any] = None
if self.use_token_type_ids:
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : List[Any] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Optional[Any] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Any = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Dict = False
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ) -> List[str]:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ) -> Optional[int]:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ) -> List[Any]:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ) -> str:
super().test_hidden_states_output()
@slow
def _UpperCAmelCase ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
lowercase__ : int = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Any:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _UpperCAmelCase ( self ) -> str:
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ : Optional[int] = self._prepare_for_class(a , a )
lowercase__ : List[str] = model_class(a )
@jax.jit
def model_jitted(a , a=None , **a ):
return model(input_ids=a , attention_mask=a , **a )
with self.subTest('JIT Enabled' ):
lowercase__ : Optional[Any] = model_jitted(**a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowercase__ : List[str] = model_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCAmelCase ( self , a , a , a , a=1e-5 , a="outputs" , a=None ) -> Optional[Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(a , a , a , a , a , a )
| 645
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
def a_ ( ):
'''simple docstring'''
lowercase__ : Dict = 0
for i in range(1 , 1001 ):
total += i**i
return str(_lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 645
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Tuple = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : int = []
for i in range(self.batch_size ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a )
lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a )
labels.append(a )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[str] = YolosModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = YolosForObjectDetection(a )
model.to(a )
model.eval()
lowercase__ : Dict = model(pixel_values=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
def _UpperCAmelCase ( self , a , a , a=False ) -> Dict:
lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Dict = {}
lowercase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=a , dtype=torch.long )
lowercase__ : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=a , dtype=torch.float )
labels.append(a )
lowercase__ : Union[str, Any] = labels
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = YolosModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
# in YOLOS, the seq_len is different
lowercase__ : Tuple = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Dict = len(a )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : int = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(a , a , a ):
lowercase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(a , a ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
# YOLOS has a different seq_length
lowercase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(a , a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = YolosModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , )
lowercase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a )
lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7]
lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
| 645
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_UpperCamelCase : int = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_UpperCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645
| 1
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[int]="pt" ):
'''simple docstring'''
lowercase__ : Dict = {'add_prefix_space': True} if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not line.startswith(' ' ) else {}
lowercase__ : Any = padding_side
return tokenizer(
[line] , max_length=_lowerCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any=None , ):
'''simple docstring'''
lowercase__ : int = input_ids.ne(_lowerCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase_ ( _a):
def __init__( self , a , a , a , a , a="train" , a=None , a=None , a=None , a="" , ) -> List[Any]:
super().__init__()
lowercase__ : Dict = Path(a ).joinpath(type_path + '.source' )
lowercase__ : Union[str, Any] = Path(a ).joinpath(type_path + '.target' )
lowercase__ : Any = self.get_char_lens(self.src_file )
lowercase__ : Union[str, Any] = max_source_length
lowercase__ : Tuple = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowercase__ : Optional[int] = tokenizer
lowercase__ : Optional[Any] = prefix
if n_obs is not None:
lowercase__ : Union[str, Any] = self.src_lens[:n_obs]
lowercase__ : Dict = src_lang
lowercase__ : Optional[Any] = tgt_lang
def __len__( self ) -> Any:
return len(self.src_lens )
def __getitem__( self , a ) -> Dict[str, torch.Tensor]:
lowercase__ : Dict = index + 1 # linecache starts at 1
lowercase__ : List[Any] = self.prefix + linecache.getline(str(self.src_file ) , a ).rstrip('\n' )
lowercase__ : Optional[Any] = linecache.getline(str(self.tgt_file ) , a ).rstrip('\n' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase__ : str = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , a ) else self.tokenizer
)
lowercase__ : List[Any] = self.tokenizer.generator if isinstance(self.tokenizer , a ) else self.tokenizer
lowercase__ : Optional[Any] = encode_line(a , a , self.max_source_length , 'right' )
lowercase__ : Dict = encode_line(a , a , self.max_target_length , 'right' )
lowercase__ : str = source_inputs['input_ids'].squeeze()
lowercase__ : Any = target_inputs['input_ids'].squeeze()
lowercase__ : Optional[int] = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _UpperCAmelCase ( a ) -> int:
return [len(a ) for x in Path(a ).open().readlines()]
def _UpperCAmelCase ( self , a ) -> Dict[str, torch.Tensor]:
lowercase__ : Optional[int] = torch.stack([x['input_ids'] for x in batch] )
lowercase__ : List[Any] = torch.stack([x['attention_mask'] for x in batch] )
lowercase__ : str = torch.stack([x['decoder_input_ids'] for x in batch] )
lowercase__ : Dict = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , a )
else self.tokenizer.pad_token_id
)
lowercase__ : Union[str, Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , a )
else self.tokenizer.pad_token_id
)
lowercase__ : int = trim_batch(a , a )
lowercase__ , lowercase__ : List[Any] = trim_batch(a , a , attention_mask=a )
lowercase__ : Optional[int] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_UpperCamelCase : Optional[int] = getLogger(__name__)
def a_ ( _lowerCAmelCase : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : str = get_git_info()
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , 'git_log.json' ) )
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=4 , **_lowerCAmelCase : Any ):
'''simple docstring'''
with open(_lowerCAmelCase , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase , indent=_lowerCAmelCase , **_lowerCAmelCase )
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
with open(_lowerCAmelCase ) as f:
return json.load(_lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = git.Repo(search_parent_directories=_lowerCAmelCase )
lowercase__ : str = {
'repo_id': str(_lowerCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def a_ ( _lowerCAmelCase : Callable , _lowerCAmelCase : Iterable ):
'''simple docstring'''
return list(map(_lowerCAmelCase , _lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ):
'''simple docstring'''
with open(_lowerCAmelCase , 'wb' ) as f:
return pickle.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
def remove_articles(_lowerCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase : Optional[int] ):
lowercase__ : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : List[str] = normalize_answer(_lowerCAmelCase ).split()
lowercase__ : Dict = normalize_answer(_lowerCAmelCase ).split()
lowercase__ : Any = Counter(_lowerCAmelCase ) & Counter(_lowerCAmelCase )
lowercase__ : Optional[int] = sum(common.values() )
if num_same == 0:
return 0
lowercase__ : List[str] = 1.0 * num_same / len(_lowerCAmelCase )
lowercase__ : Optional[int] = 1.0 * num_same / len(_lowerCAmelCase )
lowercase__ : Optional[int] = (2 * precision * recall) / (precision + recall)
return fa
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
return normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase )
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ):
'''simple docstring'''
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
lowercase__ : Optional[int] = 0
for hypo, pred in zip(_lowerCAmelCase , _lowerCAmelCase ):
em += exact_match_score(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
em /= len(_lowerCAmelCase )
return {"em": em}
def a_ ( _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase__ : Dict = 'dropout_rate'
for p in extra_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if not hasattr(_lowerCAmelCase , _lowerCAmelCase ) and not hasattr(_lowerCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowerCAmelCase ) )
delattr(_lowerCAmelCase , _lowerCAmelCase )
continue
lowercase__ : Union[str, Any] = p if hasattr(_lowerCAmelCase , _lowerCAmelCase ) else equivalent_param[p]
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
delattr(_lowerCAmelCase , _lowerCAmelCase )
return hparams, config
| 645
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : Union[str, Any] = 32
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCAmelCase_ :
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a ) -> Any:
gc.collect()
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = torch.cuda.memory_allocated()
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase__ : List[Any] = bamb(self.end - self.begin )
lowercase__ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[int] = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : int = int(config['batch_size'] )
lowercase__ : Union[str, Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Tuple = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : List[Any] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , )
lowercase__ : Any = parser.parse_args()
lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
| 1
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : list[float] , _lowerCAmelCase : Any ):
'''simple docstring'''
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(_lowerCAmelCase ):
print(f"""{i}\t\t{d}""" )
def a_ ( _lowerCAmelCase : list[dict[str, int]] , _lowerCAmelCase : list[float] , _lowerCAmelCase : int ):
'''simple docstring'''
for j in range(_lowerCAmelCase ):
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def a_ ( _lowerCAmelCase : list[dict[str, int]] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Union[str, Any] = [float('inf' )] * vertex_count
lowercase__ : Optional[int] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_lowerCAmelCase ):
lowercase__ , lowercase__ , lowercase__ : Dict = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
lowercase__ : str = distance[u] + w
lowercase__ : List[Any] = check_negative_cycle(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase : int = int(input("Enter number of vertices: ").strip())
_UpperCamelCase : Union[str, Any] = int(input("Enter number of edges: ").strip())
_UpperCamelCase : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
_UpperCamelCase : List[Any] = {"src": src, "dst": dest, "weight": weight}
_UpperCamelCase : Tuple = int(input("\nEnter shortest path source:").strip())
_UpperCamelCase : str = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 645
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = [0] * len(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
lowercase__ : List[str] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : Union[str, Any] = j
return prefix_result
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
return max(prefix_function(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
for param in module.parameters():
lowercase__ : List[str] = False
def a_ ( ):
'''simple docstring'''
lowercase__ : Dict = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ : List[str] = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Dict = plt.imshow(_lowerCAmelCase )
fig.axes.get_xaxis().set_visible(_lowerCAmelCase )
fig.axes.get_yaxis().set_visible(_lowerCAmelCase )
plt.show()
def a_ ( ):
'''simple docstring'''
lowercase__ : List[Any] = datetime.now()
lowercase__ : List[Any] = current_time.strftime('%H:%M:%S' )
return timestamp
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]:
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0}
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : str = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : int = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : str = do_center_crop
lowercase__ : List[Any] = crop_size
lowercase__ : Union[str, Any] = do_flip_channel_order
def _UpperCAmelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = MobileViTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_flip_channel_order' ) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 645
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase : str = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict:
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = num_choices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' )
lowercase__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Any = model(a , attention_mask=a )[0]
lowercase__ : Tuple = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a )
lowercase__ : Optional[Any] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
lowercase__ : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
lowercase__ : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy)
lowercase__ : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase__ : Optional[int] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_UpperCamelCase : Union[str, Any] = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 645
|
"""simple docstring"""
from collections.abc import Sequence
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
lowercase__ : int = 0.0
for coeff in reversed(_lowerCAmelCase ):
lowercase__ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase : Dict = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 645
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def a_ ( ):
'''simple docstring'''
lowercase__ : List[str] = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
lowercase__ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
lowercase__ : List[Any] = parser.parse_args()
if not hasattr(_lowerCAmelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
lowercase__ : str = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 645
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCamelCase : Any = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def a_ ( _lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a))
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = None
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a )
lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a )
self.assertTrue(os.path.exists(a ) )
@pytest.mark.integration
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : Optional[int] = import_main_class(dataset_module.module_path )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__ : Optional[int] = None
builder_instance.download_and_prepare()
lowercase__ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 645
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : str = field(
metadata={"help": "The output directory where the model will be written."} , )
lowerCamelCase__ : str = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} , )
lowerCamelCase__ : str = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} , )
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"})
lowerCamelCase__ : Optional[str] = field(
default=_a , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"})
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = HfArgumentParser((ModelArguments,) )
((lowercase__) , ) : List[str] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowercase__ : Dict = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowercase__ : Any = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowercase__ : str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowercase__ : Tuple = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowercase__ : Optional[int] = True
lowercase__ : Optional[int] = True
lowercase__ : Tuple = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=_lowerCAmelCase , decoder_config=_lowerCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowercase__ : Optional[int] = decoder_config.decoder_start_token_id
lowercase__ : str = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowercase__ : Dict = decoder_config.bos_token_id
if pad_token_id is None:
lowercase__ : Tuple = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowercase__ : int = decoder_config.eos_token_id
lowercase__ : List[str] = decoder_start_token_id
lowercase__ : Any = pad_token_id
lowercase__ : List[str] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowercase__ : Tuple = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowercase__ : Optional[int] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 645
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a_ ( _lowerCAmelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
'''simple docstring'''
lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowerCAmelCase , _lowerCAmelCase )
# Predict target for test data
lowercase__ : str = xgb.predict(_lowerCAmelCase )
lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 )
return predictions
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = fetch_california_housing()
lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split(
_lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 )
lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 645
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : Dict = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[int] = "open-llama"
def __init__( self , a=1_0_0_0_0_0 , a=4_0_9_6 , a=1_1_0_0_8 , a=3_2 , a=3_2 , a="silu" , a=2_0_4_8 , a=0.02 , a=1e-6 , a=True , a=0 , a=1 , a=2 , a=False , a=True , a=0.1 , a=0.1 , a=True , a=True , a=None , **a , ) -> Optional[Any]:
lowercase__ : List[Any] = vocab_size
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : str = hidden_act
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Any = rms_norm_eps
lowercase__ : List[Any] = use_cache
lowercase__ : Any = kwargs.pop(
'use_memorry_efficient_attention' , a )
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : List[str] = attention_dropout_prob
lowercase__ : Tuple = use_stable_embedding
lowercase__ : Optional[Any] = shared_input_output_embedding
lowercase__ : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , tie_word_embeddings=a , **a , )
def _UpperCAmelCase ( self ) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"""got {self.rope_scaling}""" )
lowercase__ : Dict = self.rope_scaling.get('type' , a )
lowercase__ : Union[str, Any] = self.rope_scaling.get('factor' , a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(a , a ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 645
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]:
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = patch_size
lowercase__ : Tuple = tubelet_size
lowercase__ : Optional[int] = num_frames
lowercase__ : Optional[int] = is_training
lowercase__ : int = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = mask_ratio
lowercase__ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase__ : Optional[Any] = (image_size // patch_size) ** 2
lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase__ : str = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = VideoMAEModel(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Any = torch.ones((self.num_masks,) )
lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase__ : str = model(a , a )
# model only returns predictions for masked patches
lowercase__ : str = mask.sum().item()
lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : str = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VideoMAEModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]:
lowercase__ : Union[str, Any] = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase__ : Union[str, Any] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase__ : Optional[Any] = True
lowercase__ : int = False
lowercase__ : Any = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : List[str] = len(a )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a , a , a ):
lowercase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase__ : str = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
a )
lowercase__ : str = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a )
# add boolean mask, indicating which patches to mask
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowercase__ : str = torch.load(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase__ : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to(
a )
with torch.no_grad():
lowercase__ : Any = model(**a )
lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Optional[Any] = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase__ : Optional[int] = {
'wmt16-en-de-dist-12-1': [2_8.3, 2_7.5_2],
'wmt16-en-de-dist-6-1': [2_7.4, 2_7.1_1],
'wmt16-en-de-12-1': [2_6.9, 2_5.7_5],
}
lowercase__ : Optional[Any] = f"""{src_lang}-{tgt_lang}"""
lowercase__ : List[Any] = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
lowercase__ : Dict = os.path.join(_lowerCAmelCase , 'README.md' )
print(f"""Generating {path}""" )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(_lowerCAmelCase )
# make sure we are under the root of the project
_UpperCamelCase : List[str] = Path(__file__).resolve().parent.parent.parent
_UpperCamelCase : Union[str, Any] = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_UpperCamelCase : Dict = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 645
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 645
| 1
|
"""simple docstring"""
import os
import sys
import unittest
_UpperCamelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_UpperCamelCase : int = os.path.join(git_repo_path, "src", "transformers")
_UpperCamelCase : Union[str, Any] = "\n{0} = None\n"
_UpperCamelCase : Any = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
_UpperCamelCase : str = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(a )
lowercase__ : Dict = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(a , 'tokenizers' )
lowercase__ : Optional[Any] = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(a , 'tensorflow_text' )
lowercase__ : List[str] = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(a , 'sentencepiece_and_tokenizers' )
lowercase__ : Dict = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(a , 'sentencepiece_and_tensorflow_text' )
lowercase__ : int = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(a , 'sentencepiece_and_tokenizers_and_vision' )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , a )
self.assertIn('tensorflow_text' , a )
self.assertIn('sentencepiece_and_tokenizers' , a )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(a , '\nCONSTANT = None\n' )
lowercase__ : List[Any] = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
a , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
lowercase__ : int = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
lowercase__ : List[str] = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(a , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
lowercase__ : List[str] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , a )
| 645
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int:
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = embed_dim
lowercase__ : Any = depths
lowercase__ : Dict = num_heads
lowercase__ : List[str] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Tuple = qkv_bias
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = drop_path_rate
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = use_absolute_embeddings
lowercase__ : Optional[Any] = patch_norm
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : List[str] = is_training
lowercase__ : int = scope
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : List[str] = encoder_stride
lowercase__ : Optional[Any] = out_features
lowercase__ : Dict = out_indices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Tuple = MaskFormerSwinModel(config=a )
model.to(a )
model.eval()
lowercase__ : str = model(a )
lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(a ):
lowercase__ : Dict = ['stem']
lowercase__ : List[str] = MaskFormerSwinBackbone(config=a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = MaskFormerSwinModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> str:
return
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
@unittest.skip('Swin does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple:
lowercase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swin has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = 3
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a ):
lowercase__ : Union[str, Any] = 0
return t
def check_equivalence(a , a , a , a={} ):
with torch.no_grad():
lowercase__ : Optional[Any] = model(**a , return_dict=a , **a )
lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple()
def recursive_check(a , a ):
if isinstance(a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a , a ):
recursive_check(a , a )
elif isinstance(a , a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a , a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has"""
f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}."""
) , )
recursive_check(a , a )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
model.to(a )
model.eval()
lowercase__ : Tuple = self._prepare_for_class(a , a )
lowercase__ : Optional[Any] = self._prepare_for_class(a , a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a )
lowercase__ : int = self._prepare_for_class(a , a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _a):
lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ : Optional[Any] = backbone_class(a )
backbone.to(a )
backbone.eval()
lowercase__ : Union[str, Any] = backbone(**a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ : List[str] = backbone(**a , output_hidden_states=a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ : List[Any] = backbone(**a , output_attentions=a )
self.assertIsNotNone(outputs.attentions )
| 645
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_UpperCamelCase : Tuple = trt.Logger(trt.Logger.WARNING)
_UpperCamelCase : Optional[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_UpperCamelCase : Optional[int] = logging.getLogger(__name__)
_UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=3_84,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=1_28,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
_UpperCamelCase : List[str] = parser.parse_args()
if args.tokenizer_name:
_UpperCamelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
_UpperCamelCase : str = args.per_device_eval_batch_size
_UpperCamelCase : Tuple = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : str = "temp_engine/bert-fp32.engine"
if args.fpaa:
_UpperCamelCase : Optional[Any] = "temp_engine/bert-fp16.engine"
if args.inta:
_UpperCamelCase : List[str] = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
_UpperCamelCase : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_UpperCamelCase : List[Any] = [network.get_input(i) for i in range(network.num_inputs)]
_UpperCamelCase : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_UpperCamelCase : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_UpperCamelCase : str = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_UpperCamelCase : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = np.asarray(inputs['input_ids'] , dtype=np.intaa )
lowercase__ : List[str] = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
lowercase__ : Dict = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowerCAmelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowerCAmelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowerCAmelCase )
# start time
lowercase__ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(_lowerCAmelCase ) for d_inp in d_inputs] + [int(_lowerCAmelCase ), int(_lowerCAmelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
cuda.memcpy_dtoh_async(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase__ : str = time.time()
lowercase__ : Any = end_time - start_time
lowercase__ : str = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_UpperCamelCase : Dict = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCamelCase : Optional[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_UpperCamelCase : Optional[Any] = raw_datasets["validation"].column_names
_UpperCamelCase : str = "question" if "question" in column_names else column_names[0]
_UpperCamelCase : Any = "context" if "context" in column_names else column_names[1]
_UpperCamelCase : Any = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_UpperCamelCase : Optional[int] = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
_UpperCamelCase : Dict = min(args.max_seq_length, tokenizer.model_max_length)
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase__ : str = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=_lowerCAmelCase , stride=args.doc_stride , return_overflowing_tokens=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase__ : Any = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase__ : Union[str, Any] = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase__ : List[str] = tokenized_examples.sequence_ids(_lowerCAmelCase )
lowercase__ : Dict = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase__ : Optional[int] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase__ : int = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
_UpperCamelCase : int = raw_datasets["validation"]
# Validation Feature Creation
_UpperCamelCase : List[Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
_UpperCamelCase : int = default_data_collator
_UpperCamelCase : Any = eval_dataset.remove_columns(["example_id", "offset_mapping"])
_UpperCamelCase : Any = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : List[str]="eval" ):
'''simple docstring'''
lowercase__ : List[Any] = postprocess_qa_predictions(
examples=_lowerCAmelCase , features=_lowerCAmelCase , predictions=_lowerCAmelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowerCAmelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase__ : Optional[int] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowercase__ : Optional[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowercase__ : Any = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_lowerCAmelCase , label_ids=_lowerCAmelCase )
_UpperCamelCase : Optional[Any] = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_lowerCAmelCase ) ) * engine.get_binding_dtype(_lowerCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
_UpperCamelCase : Any = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_UpperCamelCase : List[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_UpperCamelCase : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_UpperCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
_UpperCamelCase : str = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_UpperCamelCase : int = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
_UpperCamelCase : int = 0.0
_UpperCamelCase : int = 0
_UpperCamelCase : Dict = timeit.default_timer()
_UpperCamelCase : List[str] = None
for step, batch in enumerate(eval_dataloader):
_UpperCamelCase , _UpperCamelCase : int = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_UpperCamelCase , _UpperCamelCase : str = outputs
_UpperCamelCase : List[str] = torch.tensor(start_logits)
_UpperCamelCase : List[Any] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_UpperCamelCase : int = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
_UpperCamelCase : str = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
_UpperCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_UpperCamelCase : List[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
_UpperCamelCase : List[str] = nested_truncate(all_preds, len(eval_dataset))
_UpperCamelCase : Optional[int] = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 10_00 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 10_00))
logger.info("Total Number of Inference = %d", niter)
_UpperCamelCase : Dict = post_processing_function(eval_examples, eval_dataset, all_preds)
_UpperCamelCase : Dict = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 645
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
| 1
|
"""simple docstring"""
from string import ascii_uppercase
_UpperCamelCase : Dict = {str(ord(c) - 55): c for c in ascii_uppercase}
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ : str = ''
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = 0
while div != 1:
lowercase__ , lowercase__ : List[str] = divmod(_lowerCAmelCase , _lowerCAmelCase )
if base >= 11 and 9 < mod < 36:
lowercase__ : List[str] = ALPHABET_VALUES[str(_lowerCAmelCase )]
else:
lowercase__ : List[Any] = str(_lowerCAmelCase )
new_value += actual_value
lowercase__ : Optional[int] = num // base
lowercase__ : List[Any] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_lowerCAmelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(10_00):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 645
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : List[Any] = controlnet_params
lowercase__ : int = 'bird'
lowercase__ : List[Any] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__ : List[Any] = jax.random.PRNGKey(0 )
lowercase__ : Tuple = jax.random.split(a , jax.device_count() )
lowercase__ : str = replicate(a )
lowercase__ : List[str] = shard(a )
lowercase__ : Dict = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Optional[Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : Optional[Any] = controlnet_params
lowercase__ : List[Any] = 'Chef in the kitchen'
lowercase__ : List[str] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__ : List[str] = jax.random.PRNGKey(0 )
lowercase__ : str = jax.random.split(a , jax.device_count() )
lowercase__ : Optional[Any] = replicate(a )
lowercase__ : Optional[Any] = shard(a )
lowercase__ : List[Any] = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : str = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 645
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_UpperCamelCase : List[str] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_UpperCamelCase : List[Any] = "UperNetConfig"
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a , a , a = 0 , a = False , a = 1 , ) -> None:
super().__init__()
lowercase__ : List[str] = nn.Convad(
in_channels=a , out_channels=a , kernel_size=a , padding=a , bias=a , dilation=a , )
lowercase__ : int = nn.BatchNormad(a )
lowercase__ : Union[str, Any] = nn.ReLU()
def _UpperCAmelCase ( self , a ) -> torch.Tensor:
lowercase__ : Union[str, Any] = self.conv(a )
lowercase__ : Tuple = self.batch_norm(a )
lowercase__ : str = self.activation(a )
return output
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a , a ) -> None:
super().__init__()
lowercase__ : Optional[int] = [
nn.AdaptiveAvgPoolad(a ),
UperNetConvModule(a , a , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(a ) , a )
def _UpperCAmelCase ( self , a ) -> torch.Tensor:
lowercase__ : Optional[Any] = input
for layer in self.layers:
lowercase__ : Union[str, Any] = layer(a )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a , a , a ) -> None:
super().__init__()
lowercase__ : int = pool_scales
lowercase__ : Optional[int] = align_corners
lowercase__ : List[str] = in_channels
lowercase__ : Optional[int] = channels
lowercase__ : Dict = []
for i, pool_scale in enumerate(a ):
lowercase__ : Optional[Any] = UperNetPyramidPoolingBlock(pool_scale=a , in_channels=a , channels=a )
self.blocks.append(a )
self.add_module(str(a ) , a )
def _UpperCAmelCase ( self , a ) -> List[torch.Tensor]:
lowercase__ : Union[str, Any] = []
for ppm in self.blocks:
lowercase__ : str = ppm(a )
lowercase__ : List[Any] = nn.functional.interpolate(
a , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(a )
return ppm_outs
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a ) -> Dict:
super().__init__()
lowercase__ : List[str] = config
lowercase__ : List[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase__ : Tuple = in_channels
lowercase__ : Union[str, Any] = config.hidden_size
lowercase__ : Union[str, Any] = False
lowercase__ : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowercase__ : Optional[Any] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowercase__ : List[str] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowercase__ : str = nn.ModuleList()
lowercase__ : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase__ : Optional[int] = UperNetConvModule(a , self.channels , kernel_size=1 )
lowercase__ : Any = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(a )
self.fpn_convs.append(a )
lowercase__ : Any = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _UpperCAmelCase ( self ) -> List[Any]:
self.apply(self._init_weights )
def _UpperCAmelCase ( self , a ) -> str:
if isinstance(a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _UpperCAmelCase ( self , a ) -> Dict:
lowercase__ : Optional[int] = inputs[-1]
lowercase__ : int = [x]
psp_outs.extend(self.psp_modules(a ) )
lowercase__ : Tuple = torch.cat(a , dim=1 )
lowercase__ : Tuple = self.bottleneck(a )
return output
def _UpperCAmelCase ( self , a ) -> torch.Tensor:
# build laterals
lowercase__ : str = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(a ) )
# build top-down path
lowercase__ : str = len(a )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase__ : str = laterals[i - 1].shape[2:]
lowercase__ : Union[str, Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=a , mode='bilinear' , align_corners=self.align_corners )
# build outputs
lowercase__ : Union[str, Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase__ : Union[str, Any] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
lowercase__ : Dict = torch.cat(a , dim=1 )
lowercase__ : Dict = self.fpn_bottleneck(a )
lowercase__ : int = self.classifier(a )
return output
class UpperCAmelCase_ ( nn.Module):
def __init__( self , a , a = 2 , a = 3 , a = 1 ) -> None:
super().__init__()
lowercase__ : int = config
lowercase__ : Dict = config.auxiliary_in_channels
lowercase__ : str = config.auxiliary_channels
lowercase__ : str = config.auxiliary_num_convs
lowercase__ : str = config.auxiliary_concat_input
lowercase__ : Optional[Any] = in_index
lowercase__ : Tuple = (kernel_size // 2) * dilation
lowercase__ : List[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=a , padding=a , dilation=a ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=a , padding=a , dilation=a ) )
if self.num_convs == 0:
lowercase__ : int = nn.Identity()
else:
lowercase__ : Union[str, Any] = nn.Sequential(*a )
if self.concat_input:
lowercase__ : List[str] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=a , padding=kernel_size // 2 )
lowercase__ : List[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
self.apply(self._init_weights )
def _UpperCAmelCase ( self , a ) -> str:
if isinstance(a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _UpperCAmelCase ( self , a ) -> torch.Tensor:
# just take the relevant feature maps
lowercase__ : List[Any] = encoder_hidden_states[self.in_index]
lowercase__ : Tuple = self.convs(a )
if self.concat_input:
lowercase__ : Optional[int] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowercase__ : int = self.classifier(a )
return output
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : int = UperNetConfig
lowerCamelCase__ : Optional[Any] = "pixel_values"
lowerCamelCase__ : List[Any] = True
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
if isinstance(a , a ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _UpperCAmelCase ( self ) -> Optional[int]:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _UpperCAmelCase ( self , a , a=False ) -> List[str]:
if isinstance(a , a ):
lowercase__ : Tuple = value
_UpperCamelCase : int = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_UpperCamelCase : str = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , _a , )
class UpperCAmelCase_ ( _a):
def __init__( self , a ) -> Tuple:
super().__init__(a )
lowercase__ : Optional[int] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase__ : Optional[int] = UperNetHead(a , in_channels=self.backbone.channels )
lowercase__ : Optional[int] = UperNetFCNHead(a ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=a , config_class=_CONFIG_FOR_DOC )
def _UpperCAmelCase ( self , a = None , a = None , a = None , a = None , a = None , ) -> Union[tuple, SemanticSegmenterOutput]:
lowercase__ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : List[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase__ : str = self.backbone.forward_with_filtered_kwargs(
a , output_hidden_states=a , output_attentions=a )
lowercase__ : List[str] = outputs.feature_maps
lowercase__ : List[str] = self.decode_head(a )
lowercase__ : Union[str, Any] = nn.functional.interpolate(a , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=a )
lowercase__ : List[str] = None
if self.auxiliary_head is not None:
lowercase__ : Dict = self.auxiliary_head(a )
lowercase__ : int = nn.functional.interpolate(
a , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=a )
lowercase__ : List[str] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
lowercase__ : List[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase__ : Union[str, Any] = loss_fct(a , a )
lowercase__ : Dict = loss_fct(a , a )
lowercase__ : Dict = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase__ : Optional[int] = (logits,) + outputs[1:]
else:
lowercase__ : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=a , logits=a , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 645
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 645
| 1
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[3_2, 6_4, 1_2_8] , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2"] , a=[1, 2] , ) -> List[str]:
lowercase__ : str = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : List[Any] = image_size
lowercase__ : Tuple = patch_size
lowercase__ : Optional[int] = num_channels
lowercase__ : List[Any] = embed_dim
lowercase__ : Dict = hidden_sizes
lowercase__ : List[str] = depths
lowercase__ : Any = num_heads
lowercase__ : Tuple = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Union[str, Any] = qkv_bias
lowercase__ : str = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = drop_path_rate
lowercase__ : List[str] = hidden_act
lowercase__ : Union[str, Any] = use_absolute_embeddings
lowercase__ : str = patch_norm
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Optional[Any] = initializer_range
lowercase__ : List[Any] = is_training
lowercase__ : Any = scope
lowercase__ : List[str] = use_labels
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : List[Any] = encoder_stride
lowercase__ : Tuple = out_features
lowercase__ : int = out_indices
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : str = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Any:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , a , a , a ) -> Any:
lowercase__ : Union[str, Any] = FocalNetModel(config=a )
model.to(a )
model.eval()
lowercase__ : Dict = model(a )
lowercase__ : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self , a , a , a ) -> Tuple:
lowercase__ : Tuple = FocalNetBackbone(config=a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase__ : str = None
lowercase__ : List[str] = FocalNetBackbone(config=a )
model.to(a )
model.eval()
lowercase__ : Optional[int] = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = FocalNetForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowercase__ : Optional[int] = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : int = 1
lowercase__ : Tuple = FocalNetForMaskedImageModeling(a )
model.to(a )
model.eval()
lowercase__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Any = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Optional[Any] = self.type_sequence_label_size
lowercase__ : List[str] = FocalNetForImageClassification(a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : Optional[Any] = FocalNetForImageClassification(a )
model.to(a )
model.eval()
lowercase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Dict = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs
lowercase__ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Optional[Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : List[Any] = FocalNetModelTester(self )
lowercase__ : List[str] = ConfigTester(self , config_class=a , embed_dim=3_7 , has_text_modality=a )
def _UpperCAmelCase ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> Optional[Any]:
return
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> str:
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase__ : Optional[Any] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase__ : Dict = model_class(a )
lowercase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Dict = [*signature.parameters.keys()]
lowercase__ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self , a , a , a , a ) -> Union[str, Any]:
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Tuple = outputs.hidden_states
lowercase__ : Tuple = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# FocalNet has a different seq_length
lowercase__ : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase__ : int = outputs.reshaped_hidden_states
self.assertEqual(len(a ) , a )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = reshaped_hidden_states[0].shape
lowercase__ : str = (
reshaped_hidden_states[0].view(a , a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase__ : Tuple = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
self.check_hidden_states_output(a , a , a , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = 3
lowercase__ : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase__ : List[Any] = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = FocalNetModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Any = _config_zero_init(a )
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(config=a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Tuple:
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(a )
lowercase__ : str = self.default_image_processor
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Optional[int] = model(**a )
# verify the logits
lowercase__ : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.2_166, -0.4_368, 0.2_191] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_8_1 )
@require_torch
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = (FocalNetBackbone,) if is_torch_available() else ()
lowerCamelCase__ : Any = FocalNetConfig
lowerCamelCase__ : Optional[int] = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = FocalNetModelTester(self )
| 645
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import os
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[str] = len(grid[0] )
lowercase__ : Optional[int] = len(_lowerCAmelCase )
lowercase__ : Dict = 0
lowercase__ : Union[str, Any] = 0
lowercase__ : List[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCAmelCase ):
for j in range(n_rows - 3 ):
lowercase__ : Optional[Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowercase__ : Optional[Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowercase__ : Optional[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowercase__ : Optional[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowercase__ : List[Any] = max(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if max_product > largest:
lowercase__ : Dict = max_product
return largest
def a_ ( ):
'''simple docstring'''
lowercase__ : List[str] = []
with open(os.path.dirname(_lowerCAmelCase ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
lowercase__ : Dict = [[int(_lowerCAmelCase ) for i in grid[j]] for j in range(len(_lowerCAmelCase ) )]
return largest_product(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 645
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 645
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = [0] * len(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
lowercase__ : List[str] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : Union[str, Any] = j
return prefix_result
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
return max(prefix_function(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645
| 1
|
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
'''simple docstring'''
lowercase__ : Dict = None
if token is not None:
lowercase__ : List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
lowercase__ : Dict = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowercase__ : List[Any] = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
lowercase__ : Optional[Any] = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
lowercase__ : Optional[Any] = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_lowerCAmelCase ):
lowercase__ : Optional[Any] = requests.get(url + f"""&page={i + 2}""" , headers=_lowerCAmelCase ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple=None ):
'''simple docstring'''
lowercase__ : int = None
if token is not None:
lowercase__ : List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
lowercase__ : Union[str, Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowercase__ : Any = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
lowercase__ : str = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
lowercase__ : Optional[Any] = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_lowerCAmelCase ):
lowercase__ : int = requests.get(url + f"""&page={i + 2}""" , headers=_lowerCAmelCase ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
'''simple docstring'''
lowercase__ : Dict = None
if token is not None:
lowercase__ : Optional[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
lowercase__ : List[Any] = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase , allow_redirects=_lowerCAmelCase )
lowercase__ : Tuple = result.headers['Location']
lowercase__ : str = requests.get(_lowerCAmelCase , allow_redirects=_lowerCAmelCase )
lowercase__ : Union[str, Any] = os.path.join(_lowerCAmelCase , f"""{artifact_name}.zip""" )
with open(_lowerCAmelCase , 'wb' ) as fp:
fp.write(response.content )
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
lowercase__ : Optional[int] = []
lowercase__ : List[Any] = []
lowercase__ : List[str] = None
with zipfile.ZipFile(_lowerCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCAmelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_lowerCAmelCase ) as f:
for line in f:
lowercase__ : Optional[Any] = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowercase__ : Union[str, Any] = line[: line.index(': ' )]
lowercase__ : List[Any] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
lowercase__ : str = line[len('FAILED ' ) :]
failed_tests.append(_lowerCAmelCase )
elif filename == "job_name.txt":
lowercase__ : str = line
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(_lowerCAmelCase )} for `errors` """
f"""and {len(_lowerCAmelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
' problem.' )
lowercase__ : List[str] = None
if job_name and job_links:
lowercase__ : Any = job_links.get(_lowerCAmelCase , _lowerCAmelCase )
# A list with elements of the form (line of error, error, failed test)
lowercase__ : str = [x + [y] + [job_link] for x, y in zip(_lowerCAmelCase , _lowerCAmelCase )]
return result
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str=None ):
'''simple docstring'''
lowercase__ : Any = []
lowercase__ : Optional[Any] = [os.path.join(_lowerCAmelCase , _lowerCAmelCase ) for p in os.listdir(_lowerCAmelCase ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_lowerCAmelCase , job_links=_lowerCAmelCase ) )
return errors
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=None ):
'''simple docstring'''
lowercase__ : List[Any] = Counter()
counter.update([x[1] for x in logs] )
lowercase__ : List[str] = counter.most_common()
lowercase__ : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowercase__ : Union[str, Any] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
lowercase__ : Optional[int] = dict(sorted(r.items() , key=lambda _lowerCAmelCase : item[1]["count"] , reverse=_lowerCAmelCase ) )
return r
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
lowercase__ : int = test.split('::' )[0]
if test.startswith('tests/models/' ):
lowercase__ : Optional[int] = test.split('/' )[2]
else:
lowercase__ : Tuple = None
return test
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple=None ):
'''simple docstring'''
lowercase__ : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowercase__ : Optional[int] = [x for x in logs if x[2] is not None]
lowercase__ : int = {x[2] for x in logs}
lowercase__ : Any = {}
for test in tests:
lowercase__ : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowercase__ : Optional[Any] = counter.most_common()
lowercase__ : List[str] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowercase__ : Optional[Any] = sum(error_counts.values() )
if n_errors > 0:
lowercase__ : Optional[int] = {'count': n_errors, 'errors': error_counts}
lowercase__ : List[str] = dict(sorted(r.items() , key=lambda _lowerCAmelCase : item[1]["count"] , reverse=_lowerCAmelCase ) )
return r
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = '| no. | error | status |'
lowercase__ : Any = '|-:|:-|:-|'
lowercase__ : Optional[int] = [header, sep]
for error in reduced_by_error:
lowercase__ : List[Any] = reduced_by_error[error]['count']
lowercase__ : Any = f"""| {count} | {error[:100]} | |"""
lines.append(_lowerCAmelCase )
return "\n".join(_lowerCAmelCase )
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
lowercase__ : Optional[Any] = '| model | no. of errors | major error | count |'
lowercase__ : Dict = '|-:|-:|-:|-:|'
lowercase__ : List[Any] = [header, sep]
for model in reduced_by_model:
lowercase__ : Optional[Any] = reduced_by_model[model]['count']
lowercase__ , lowercase__ : List[Any] = list(reduced_by_model[model]['errors'].items() )[0]
lowercase__ : Optional[Any] = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(_lowerCAmelCase )
return "\n".join(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_UpperCamelCase : str = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_UpperCamelCase : Tuple = get_job_links(args.workflow_run_id, token=args.token)
_UpperCamelCase : str = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_UpperCamelCase : List[str] = k.find(" / ")
_UpperCamelCase : int = k[index + len(" / ") :]
_UpperCamelCase : Any = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_UpperCamelCase : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_UpperCamelCase : Union[str, Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_UpperCamelCase : Any = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_UpperCamelCase : Union[str, Any] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_UpperCamelCase : List[str] = reduce_by_error(errors)
_UpperCamelCase : int = reduce_by_model(errors)
_UpperCamelCase : List[str] = make_github_table(reduced_by_error)
_UpperCamelCase : List[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 645
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
| 1
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
_UpperCamelCase : int = "docs/source/en/_toctree.yml"
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Optional[Any] = defaultdict(_lowerCAmelCase )
lowercase__ : Tuple = []
lowercase__ : int = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(_lowerCAmelCase )
lowercase__ : Dict = new_doc_list
lowercase__ : Tuple = [key for key, value in counts.items() if value > 1]
lowercase__ : Tuple = []
for duplicate_key in duplicates:
lowercase__ : Any = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(_lowerCAmelCase ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
lowercase__ : Optional[int] = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCAmelCase ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(_lowerCAmelCase )
# Sort
return overview_doc
def a_ ( _lowerCAmelCase : Optional[Any]=False ):
'''simple docstring'''
with open(_lowerCAmelCase , encoding='utf-8' ) as f:
lowercase__ : int = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ : List[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ : str = content[api_idx]['sections']
# Then to the model doc
lowercase__ : List[str] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowercase__ : Tuple = api_doc[scheduler_idx]['sections']
lowercase__ : int = clean_doc_toc(_lowerCAmelCase )
lowercase__ : Optional[Any] = False
if new_scheduler_doc != scheduler_doc:
lowercase__ : str = True
if overwrite:
lowercase__ : int = new_scheduler_doc
if diff:
if overwrite:
lowercase__ : Any = api_doc
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_lowerCAmelCase , allow_unicode=_lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def a_ ( _lowerCAmelCase : Tuple=False ):
'''simple docstring'''
with open(_lowerCAmelCase , encoding='utf-8' ) as f:
lowercase__ : int = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ : int = content[api_idx]['sections']
# Then to the model doc
lowercase__ : str = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowercase__ : Optional[Any] = False
lowercase__ : Optional[Any] = api_doc[pipeline_idx]['sections']
lowercase__ : int = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowercase__ : Any = pipeline_doc['section']
lowercase__ : int = clean_doc_toc(_lowerCAmelCase )
if overwrite:
lowercase__ : List[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCAmelCase )
# sort overall pipeline doc
lowercase__ : str = clean_doc_toc(_lowerCAmelCase )
if new_pipeline_docs != pipeline_docs:
lowercase__ : Union[str, Any] = True
if overwrite:
lowercase__ : int = new_pipeline_docs
if diff:
if overwrite:
lowercase__ : Optional[Any] = api_doc
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_lowerCAmelCase , allow_unicode=_lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_UpperCamelCase : Tuple = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self , a ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowercase__ : str = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = 'sshleifer/tiny-gpt2'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : List[Any] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : Tuple = TensorFlowBenchmark(a , [config] )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : List[str] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : List[str] = TensorFlowBenchmark(a , [config] )
lowercase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[int] = AutoConfig.from_pretrained(a )
lowercase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a , [config] )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random'
lowercase__ : Any = AutoConfig.from_pretrained(a )
lowercase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : int = TensorFlowBenchmark(a , configs=[config] )
lowercase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
lowercase__ : Any = TensorFlowBenchmark(a )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , )
lowercase__ : Union[str, Any] = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a ):
self.assertTrue(hasattr(a , 'sequential' ) )
self.assertTrue(hasattr(a , 'cumulative' ) )
self.assertTrue(hasattr(a , 'current' ) )
self.assertTrue(hasattr(a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
lowercase__ : Optional[int] = TensorFlowBenchmark(a )
lowercase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
| 645
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict:
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = num_choices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' )
lowercase__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Any = model(a , attention_mask=a )[0]
lowercase__ : Tuple = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a )
lowercase__ : Optional[Any] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_UpperCamelCase : List[Any] = NewType("DataClass", Any)
_UpperCamelCase : Union[str, Any] = NewType("DataClassType", Any)
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def a_ ( _lowerCAmelCase : list ):
'''simple docstring'''
lowercase__ : Any = {str(_lowerCAmelCase ): choice for choice in choices}
return lambda _lowerCAmelCase : str_to_choice.get(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( *,
_lowerCAmelCase : Union[str, List[str]] = None , _lowerCAmelCase : str = None , _lowerCAmelCase : Any = dataclasses.MISSING , _lowerCAmelCase : Callable[[], Any] = dataclasses.MISSING , _lowerCAmelCase : dict = None , **_lowerCAmelCase : Optional[int] , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowercase__ : List[str] = {}
if aliases is not None:
lowercase__ : Optional[int] = aliases
if help is not None:
lowercase__ : Tuple = help
return dataclasses.field(metadata=_lowerCAmelCase , default=_lowerCAmelCase , default_factory=_lowerCAmelCase , **_lowerCAmelCase )
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Iterable[DataClassType]
def __init__( self , a , **a ) -> Optional[int]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowercase__ : int = ArgumentDefaultsHelpFormatter
super().__init__(**a )
if dataclasses.is_dataclass(a ):
lowercase__ : Optional[int] = [dataclass_types]
lowercase__ : Dict = list(a )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(a )
@staticmethod
def _UpperCAmelCase ( a , a ) -> Optional[int]:
lowercase__ : Union[str, Any] = f"""--{field.name}"""
lowercase__ : int = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , a ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
lowercase__ : Dict = kwargs.pop('aliases' , [] )
if isinstance(a , a ):
lowercase__ : str = [aliases]
lowercase__ : List[str] = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(a , 'UnionType' ) and isinstance(a , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(a ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
f""" Problem encountered in field '{field.name}'.""" )
if type(a ) not in field.type.__args__:
# filter `str` in Union
lowercase__ : List[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowercase__ : Optional[int] = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowercase__ : Dict = (
field.type.__args__[0] if isinstance(a , field.type.__args__[1] ) else field.type.__args__[1]
)
lowercase__ : Union[str, Any] = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowercase__ : str = {}
if origin_type is Literal or (isinstance(field.type , a ) and issubclass(field.type , a )):
if origin_type is Literal:
lowercase__ : Dict = field.type.__args__
else:
lowercase__ : str = [x.value for x in field.type]
lowercase__ : str = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
lowercase__ : Tuple = field.default
else:
lowercase__ : List[Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowercase__ : Tuple = copy(a )
# Hack because type=bool in argparse does not behave as we want.
lowercase__ : Any = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowercase__ : List[str] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowercase__ : str = default
# This tells argparse we accept 0 or 1 value after --field_name
lowercase__ : Dict = '?'
# This is the value that will get picked if we do --field_name (without value)
lowercase__ : str = True
elif isclass(a ) and issubclass(a , a ):
lowercase__ : List[str] = field.type.__args__[0]
lowercase__ : Optional[int] = '+'
if field.default_factory is not dataclasses.MISSING:
lowercase__ : List[str] = field.default_factory()
elif field.default is dataclasses.MISSING:
lowercase__ : List[Any] = True
else:
lowercase__ : Optional[int] = field.type
if field.default is not dataclasses.MISSING:
lowercase__ : List[str] = field.default
elif field.default_factory is not dataclasses.MISSING:
lowercase__ : int = field.default_factory()
else:
lowercase__ : List[Any] = True
parser.add_argument(a , *a , **a )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowercase__ : Any = False
parser.add_argument(f"""--no_{field.name}""" , action='store_false' , dest=field.name , **a )
def _UpperCAmelCase ( self , a ) -> int:
if hasattr(a , '_argument_group_name' ):
lowercase__ : str = self.add_argument_group(dtype._argument_group_name )
else:
lowercase__ : List[str] = self
try:
lowercase__ : Dict[str, type] = get_type_hints(a )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(a ):
lowercase__ : int = '.'.join(map(a , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(a ):
if not field.init:
continue
lowercase__ : Optional[int] = type_hints[field.name]
self._parse_dataclass_field(a , a )
def _UpperCAmelCase ( self , a=None , a=False , a=True , a=None , a=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowercase__ : Any = []
if args_filename:
args_files.append(Path(a ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowercase__ : Any = ArgumentParser()
args_file_parser.add_argument(a , type=a , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowercase__ , lowercase__ : Optional[Any] = args_file_parser.parse_known_args(args=a )
lowercase__ : int = vars(a ).get(args_file_flag.lstrip('-' ) , a )
if cmd_args_file_paths:
args_files.extend([Path(a ) for p in cmd_args_file_paths] )
lowercase__ : Optional[Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowercase__ : Optional[Any] = file_args + args if args is not None else file_args + sys.argv[1:]
lowercase__ , lowercase__ : Optional[int] = self.parse_known_args(args=a )
lowercase__ : List[str] = []
for dtype in self.dataclass_types:
lowercase__ : Union[str, Any] = {f.name for f in dataclasses.fields(a ) if f.init}
lowercase__ : Tuple = {k: v for k, v in vars(a ).items() if k in keys}
for k in keys:
delattr(a , a )
lowercase__ : Dict = dtype(**a )
outputs.append(a )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(a )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _UpperCAmelCase ( self , a , a = False ) -> Tuple[DataClass, ...]:
lowercase__ : Dict = set(args.keys() )
lowercase__ : Dict = []
for dtype in self.dataclass_types:
lowercase__ : int = {f.name for f in dataclasses.fields(a ) if f.init}
lowercase__ : Any = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowercase__ : Any = dtype(**a )
outputs.append(a )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(a )}""" )
return tuple(a )
def _UpperCAmelCase ( self , a , a = False ) -> Tuple[DataClass, ...]:
with open(Path(a ) , encoding='utf-8' ) as open_json_file:
lowercase__ : Optional[Any] = json.loads(open_json_file.read() )
lowercase__ : Optional[Any] = self.parse_dict(a , allow_extra_keys=a )
return tuple(a )
def _UpperCAmelCase ( self , a , a = False ) -> Tuple[DataClass, ...]:
lowercase__ : Optional[Any] = self.parse_dict(yaml.safe_load(Path(a ).read_text() ) , allow_extra_keys=a )
return tuple(a )
| 645
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Tuple = "SpeechT5FeatureExtractor"
lowerCamelCase__ : Optional[Any] = "SpeechT5Tokenizer"
def __init__( self , a , a ) -> str:
super().__init__(a , a )
def __call__( self , *a , **a ) -> Optional[Any]:
lowercase__ : Optional[Any] = kwargs.pop('audio' , a )
lowercase__ : Optional[Any] = kwargs.pop('text' , a )
lowercase__ : Optional[Any] = kwargs.pop('text_target' , a )
lowercase__ : int = kwargs.pop('audio_target' , a )
lowercase__ : Optional[Any] = kwargs.pop('sampling_rate' , a )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
lowercase__ : List[str] = self.feature_extractor(a , *a , sampling_rate=a , **a )
elif text is not None:
lowercase__ : int = self.tokenizer(a , **a )
else:
lowercase__ : Dict = None
if audio_target is not None:
lowercase__ : List[Any] = self.feature_extractor(audio_target=a , *a , sampling_rate=a , **a )
lowercase__ : Dict = targets['input_values']
elif text_target is not None:
lowercase__ : List[Any] = self.tokenizer(a , **a )
lowercase__ : List[Any] = targets['input_ids']
else:
lowercase__ : Tuple = None
if inputs is None:
return targets
if targets is not None:
lowercase__ : Optional[int] = labels
lowercase__ : Union[str, Any] = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowercase__ : Dict = decoder_attention_mask
return inputs
def _UpperCAmelCase ( self , *a , **a ) -> Optional[int]:
lowercase__ : Any = kwargs.pop('input_values' , a )
lowercase__ : Union[str, Any] = kwargs.pop('input_ids' , a )
lowercase__ : Union[str, Any] = kwargs.pop('labels' , a )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
lowercase__ : Optional[Any] = self.feature_extractor.pad(a , *a , **a )
elif input_ids is not None:
lowercase__ : Optional[int] = self.tokenizer.pad(a , **a )
else:
lowercase__ : Dict = None
if labels is not None:
if "input_ids" in labels or (isinstance(a , a ) and "input_ids" in labels[0]):
lowercase__ : Union[str, Any] = self.tokenizer.pad(a , **a )
lowercase__ : str = targets['input_ids']
else:
lowercase__ : List[str] = self.feature_extractor.feature_size
lowercase__ : int = self.feature_extractor.num_mel_bins
lowercase__ : Union[str, Any] = self.feature_extractor.pad(a , *a , **a )
lowercase__ : Optional[Any] = feature_size_hack
lowercase__ : Optional[int] = targets['input_values']
else:
lowercase__ : str = None
if inputs is None:
return targets
if targets is not None:
lowercase__ : Optional[int] = labels
lowercase__ : str = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowercase__ : str = decoder_attention_mask
return inputs
def _UpperCAmelCase ( self , *a , **a ) -> Any:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> Optional[int]:
return self.tokenizer.decode(*a , **a )
| 645
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Tuple = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : int = []
for i in range(self.batch_size ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a )
lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a )
labels.append(a )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[str] = YolosModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = YolosForObjectDetection(a )
model.to(a )
model.eval()
lowercase__ : Dict = model(pixel_values=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
def _UpperCAmelCase ( self , a , a , a=False ) -> Dict:
lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Dict = {}
lowercase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=a , dtype=torch.long )
lowercase__ : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=a , dtype=torch.float )
labels.append(a )
lowercase__ : Union[str, Any] = labels
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = YolosModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
# in YOLOS, the seq_len is different
lowercase__ : Tuple = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Dict = len(a )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : int = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(a , a , a ):
lowercase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(a , a ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
# YOLOS has a different seq_length
lowercase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(a , a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = YolosModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , )
lowercase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a )
lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7]
lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
| 645
| 1
|
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_UpperCamelCase : int = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
_UpperCamelCase : int = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
_UpperCamelCase : Union[str, Any] = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def _UpperCAmelCase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def _UpperCAmelCase ( self , a , a , a=None , a=True , a=False ) -> str:
if rouge_types is None:
lowercase__ : Union[str, Any] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
lowercase__ : Tuple = rouge_scorer.RougeScorer(rouge_types=a , use_stemmer=a )
if use_aggregator:
lowercase__ : Dict = scoring.BootstrapAggregator()
else:
lowercase__ : List[str] = []
for ref, pred in zip(a , a ):
lowercase__ : Dict = scorer.score(a , a )
if use_aggregator:
aggregator.add_scores(a )
else:
scores.append(a )
if use_aggregator:
lowercase__ : List[Any] = aggregator.aggregate()
else:
lowercase__ : Tuple = {}
for key in scores[0]:
lowercase__ : List[str] = [score[key] for score in scores]
return result
| 645
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645
| 1
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 645
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : Union[str, Any] = 32
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCAmelCase_ :
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a ) -> Any:
gc.collect()
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = torch.cuda.memory_allocated()
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase__ : List[Any] = bamb(self.end - self.begin )
lowercase__ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[int] = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : int = int(config['batch_size'] )
lowercase__ : Union[str, Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Tuple = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : List[Any] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , )
lowercase__ : Any = parser.parse_args()
lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[Any] = ["pixel_values"]
def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 2_5_5 , a = True , a = None , a = None , a = True , **a , ) -> None:
super().__init__(**a )
lowercase__ : Union[str, Any] = size if size is not None else {'shortest_edge': 2_2_4}
lowercase__ : Tuple = get_size_dict(a , default_to_square=a )
lowercase__ : List[str] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowercase__ : Optional[int] = get_size_dict(a , default_to_square=a , param_name='crop_size' )
lowercase__ : Union[str, Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : Optional[int] = resample
lowercase__ : Tuple = do_center_crop
lowercase__ : Tuple = crop_size
lowercase__ : Dict = do_rescale
lowercase__ : Any = rescale_factor
lowercase__ : Dict = do_normalize
lowercase__ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : List[Any] = do_convert_rgb
def _UpperCAmelCase ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ) -> np.ndarray:
lowercase__ : Optional[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase__ : List[Any] = get_resize_output_image_size(a , size=size['shortest_edge'] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a , a = None , **a , ) -> np.ndarray:
lowercase__ : Tuple = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(a , size=(size['height'], size['width']) , data_format=a , **a )
def _UpperCAmelCase ( self , a , a , a = None , **a , ) -> int:
return rescale(a , scale=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a , a , a = None , **a , ) -> np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> PIL.Image.Image:
lowercase__ : int = do_resize if do_resize is not None else self.do_resize
lowercase__ : Union[str, Any] = size if size is not None else self.size
lowercase__ : Any = get_size_dict(a , param_name='size' , default_to_square=a )
lowercase__ : str = resample if resample is not None else self.resample
lowercase__ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowercase__ : str = get_size_dict(a , param_name='crop_size' , default_to_square=a )
lowercase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : List[Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : Union[str, Any] = image_std if image_std is not None else self.image_std
lowercase__ : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : int = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : int = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Optional[Any] = [to_numpy_array(a ) for image in images]
if do_resize:
lowercase__ : Any = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
lowercase__ : Optional[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
lowercase__ : Dict = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
lowercase__ : List[Any] = [self.normalize(image=a , mean=a , std=a ) for image in images]
lowercase__ : str = [to_channel_dimension_format(a , a ) for image in images]
lowercase__ : Tuple = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
| 645
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = [0] * len(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
lowercase__ : List[str] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : Union[str, Any] = j
return prefix_result
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
return max(prefix_function(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
from collections.abc import Sequence
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
lowercase__ : int = 0.0
for coeff in reversed(_lowerCAmelCase ):
lowercase__ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase : Dict = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]:
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0}
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : str = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : int = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : str = do_center_crop
lowercase__ : List[Any] = crop_size
lowercase__ : Union[str, Any] = do_flip_channel_order
def _UpperCAmelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = MobileViTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_flip_channel_order' ) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 645
| 1
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> int:
lowercase__ : str = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(a )
lowercase__ : List[Any] = -1
lowercase__ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(a )
lowercase__ : int = model.generate(a , max_new_tokens=1_0 , do_sample=a )
lowercase__ : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : Optional[int] = TextStreamer(a )
model.generate(a , max_new_tokens=1_0 , do_sample=a , streamer=a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : List[str] = cs.out[:-1]
self.assertEqual(a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowercase__ : Any = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(a )
lowercase__ : List[str] = -1
lowercase__ : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(a )
lowercase__ : int = model.generate(a , max_new_tokens=1_0 , do_sample=a )
lowercase__ : List[str] = tokenizer.decode(greedy_ids[0] )
lowercase__ : List[str] = TextIteratorStreamer(a )
lowercase__ : List[Any] = {'input_ids': input_ids, 'max_new_tokens': 1_0, 'do_sample': False, 'streamer': streamer}
lowercase__ : Any = Thread(target=model.generate , kwargs=a )
thread.start()
lowercase__ : Any = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(a , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(a )
lowercase__ : Dict = -1
lowercase__ : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(a )
lowercase__ : Any = model.generate(a , max_new_tokens=1_0 , do_sample=a )
lowercase__ : Dict = greedy_ids[:, input_ids.shape[1] :]
lowercase__ : Union[str, Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : Dict = TextStreamer(a , skip_prompt=a )
model.generate(a , max_new_tokens=1_0 , do_sample=a , streamer=a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : List[str] = cs.out[:-1]
self.assertEqual(a , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('distilgpt2' )
lowercase__ : List[Any] = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(a )
lowercase__ : Optional[Any] = -1
lowercase__ : List[Any] = torch.ones((1, 5) , device=a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(a , skip_special_tokens=a )
model.generate(a , max_new_tokens=1 , do_sample=a , streamer=a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase__ : Optional[int] = cs.out[:-1] # Remove the final "\n"
lowercase__ : Optional[Any] = tokenizer(a , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowercase__ : Optional[int] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(a )
lowercase__ : List[str] = -1
lowercase__ : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(a )
lowercase__ : List[Any] = TextIteratorStreamer(a , timeout=0.001 )
lowercase__ : Dict = {'input_ids': input_ids, 'max_new_tokens': 1_0, 'do_sample': False, 'streamer': streamer}
lowercase__ : Tuple = Thread(target=model.generate , kwargs=a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(a ):
lowercase__ : str = ''
for new_text in streamer:
streamer_text += new_text
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict:
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = num_choices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' )
lowercase__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Any = model(a , attention_mask=a )[0]
lowercase__ : Tuple = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a )
lowercase__ : Optional[Any] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : Optional[Any] = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
|
"""simple docstring"""
from collections.abc import Sequence
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
lowercase__ : int = 0.0
for coeff in reversed(_lowerCAmelCase ):
lowercase__ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase : Dict = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 645
| 1
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : list[float] , _lowerCAmelCase : list[float] ):
'''simple docstring'''
lowercase__ : Any = sorted(numsa + numsa )
lowercase__ , lowercase__ : int = divmod(len(_lowerCAmelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
_UpperCamelCase : Optional[int] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 645
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCamelCase : Any = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def a_ ( _lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a))
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = None
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a )
lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a )
self.assertTrue(os.path.exists(a ) )
@pytest.mark.integration
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : Optional[int] = import_main_class(dataset_module.module_path )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__ : Optional[int] = None
builder_instance.download_and_prepare()
lowercase__ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 645
| 1
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
_UpperCamelCase : int = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
_UpperCamelCase : Tuple = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
_UpperCamelCase : Optional[int] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def _UpperCAmelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def _UpperCAmelCase ( self , a , a , a=None , a=1 , a="binary" , a=None ) -> Dict:
lowercase__ : Any = fa_score(
a , a , labels=a , pos_label=a , average=a , sample_weight=a )
return {"f1": float(a ) if score.size == 1 else score}
| 645
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a_ ( _lowerCAmelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
'''simple docstring'''
lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowerCAmelCase , _lowerCAmelCase )
# Predict target for test data
lowercase__ : str = xgb.predict(_lowerCAmelCase )
lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 )
return predictions
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = fetch_california_housing()
lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split(
_lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 )
lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 645
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a_ ( ):
'''simple docstring'''
lowercase__ : Union[str, Any] = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
lowercase__ : str = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert('RGB' )
return image
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Any = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dct.pop(_lowerCAmelCase )
lowercase__ : Optional[int] = val
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase__ : Dict = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowercase__ : Optional[int] = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowercase__ : List[Any] = torch.cat((q_bias, torch.zeros_like(_lowerCAmelCase , requires_grad=_lowerCAmelCase ), v_bias) )
lowercase__ : Optional[int] = qkv_bias
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = 364 if 'coco' in model_name else 224
lowercase__ : str = BlipaVisionConfig(image_size=_lowerCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowercase__ : Union[str, Any] = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowerCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
lowercase__ : Dict = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowerCAmelCase ).to_dict()
elif "t5-xl" in model_name:
lowercase__ : Any = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase__ : List[Any] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
lowercase__ : List[str] = BlipaConfig(vision_config=_lowerCAmelCase , text_config=_lowerCAmelCase )
return config, image_size
@torch.no_grad()
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]=False ):
'''simple docstring'''
lowercase__ : int = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
lowercase__ : Any = tokenizer('\n' , add_special_tokens=_lowerCAmelCase ).input_ids[0]
lowercase__ , lowercase__ : Tuple = get_blipa_config(_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
lowercase__ : Optional[int] = BlipaForConditionalGeneration(_lowerCAmelCase ).eval()
lowercase__ : List[Any] = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
lowercase__ , lowercase__ : int = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowercase__ : List[str] = 'cuda' if torch.cuda.is_available() else 'cpu'
lowercase__ , lowercase__ , lowercase__ : Dict = load_model_and_preprocess(
name=_lowerCAmelCase , model_type=_lowerCAmelCase , is_eval=_lowerCAmelCase , device=_lowerCAmelCase )
original_model.eval()
print('Done!' )
# update state dict keys
lowercase__ : Optional[int] = original_model.state_dict()
lowercase__ : int = create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase__ : List[str] = state_dict.pop(_lowerCAmelCase )
if key.startswith('Qformer.bert' ):
lowercase__ : Any = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowercase__ : str = key.replace('self' , 'attention' )
if "opt_proj" in key:
lowercase__ : List[str] = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
lowercase__ : str = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
lowercase__ : Dict = key.replace('opt' , 'language' )
if key.startswith('t5' ):
lowercase__ : Any = key.replace('t5' , 'language' )
lowercase__ : Optional[int] = val
# read in qv biases
read_in_q_v_bias(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ , lowercase__ : List[Any] = hf_model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert len(_lowerCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowercase__ : str = load_demo_image()
lowercase__ : List[Any] = vis_processors['eval'](_lowerCAmelCase ).unsqueeze(0 ).to(_lowerCAmelCase )
lowercase__ : List[str] = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowerCAmelCase )
# create processor
lowercase__ : Any = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase )
lowercase__ : Any = BlipaProcessor(image_processor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
lowercase__ : Any = processor(images=_lowerCAmelCase , return_tensors='pt' ).pixel_values.to(_lowerCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase )
original_model.to(_lowerCAmelCase )
hf_model.to(_lowerCAmelCase )
with torch.no_grad():
if "opt" in model_name:
lowercase__ : List[str] = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
lowercase__ : str = hf_model(_lowerCAmelCase , _lowerCAmelCase ).logits
else:
lowercase__ : Dict = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
lowercase__ : Any = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
lowercase__ : Optional[int] = hf_model(_lowerCAmelCase , _lowerCAmelCase , labels=_lowerCAmelCase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowercase__ : List[str] = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=_lowerCAmelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowercase__ : Optional[int] = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=_lowerCAmelCase )
else:
# cast to same type
lowercase__ : Optional[Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCAmelCase ) , _lowerCAmelCase , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
lowercase__ : int = ''
lowercase__ : Dict = tokenizer(_lowerCAmelCase , return_tensors='pt' ).input_ids.to(_lowerCAmelCase )
lowercase__ : int = original_model.generate({'image': original_pixel_values} )
lowercase__ : Optional[int] = hf_model.generate(
_lowerCAmelCase , _lowerCAmelCase , do_sample=_lowerCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _lowerCAmelCase )
lowercase__ : Optional[Any] = input_ids.shape[1]
lowercase__ : Tuple = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCAmelCase )
lowercase__ : Union[str, Any] = [text.strip() for text in output_text]
print('HF generation:' , _lowerCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser()
_UpperCamelCase : Any = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_UpperCamelCase : Tuple = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 645
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]:
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = patch_size
lowercase__ : Tuple = tubelet_size
lowercase__ : Optional[int] = num_frames
lowercase__ : Optional[int] = is_training
lowercase__ : int = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = mask_ratio
lowercase__ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase__ : Optional[Any] = (image_size // patch_size) ** 2
lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase__ : str = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = VideoMAEModel(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Any = torch.ones((self.num_masks,) )
lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase__ : str = model(a , a )
# model only returns predictions for masked patches
lowercase__ : str = mask.sum().item()
lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : str = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VideoMAEModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]:
lowercase__ : Union[str, Any] = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase__ : Union[str, Any] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase__ : Optional[Any] = True
lowercase__ : int = False
lowercase__ : Any = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : List[str] = len(a )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a , a , a ):
lowercase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase__ : str = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
a )
lowercase__ : str = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a )
# add boolean mask, indicating which patches to mask
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowercase__ : str = torch.load(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase__ : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to(
a )
with torch.no_grad():
lowercase__ : Any = model(**a )
lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_5_0, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
])
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Dict:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=a , )
assert hasattr(self , 'env' )
def _UpperCAmelCase ( self , a=1 ) -> Any:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=a , instance_type=self.instance_type , debugger_hook_config=a , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def _UpperCAmelCase ( self , a ) -> Optional[int]:
TrainingJobAnalytics(a ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _UpperCAmelCase ( self ) -> int:
# create estimator
lowercase__ : Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
lowercase__ : int = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase__ : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
lowercase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase__ : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , a )
| 645
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 645
| 1
|
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : str = TaConfig.from_json_file(_lowerCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowercase__ : str = TaForConditionalGeneration(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 645
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int:
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = embed_dim
lowercase__ : Any = depths
lowercase__ : Dict = num_heads
lowercase__ : List[str] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Tuple = qkv_bias
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = drop_path_rate
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = use_absolute_embeddings
lowercase__ : Optional[Any] = patch_norm
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : List[str] = is_training
lowercase__ : int = scope
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : List[str] = encoder_stride
lowercase__ : Optional[Any] = out_features
lowercase__ : Dict = out_indices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Tuple = MaskFormerSwinModel(config=a )
model.to(a )
model.eval()
lowercase__ : str = model(a )
lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(a ):
lowercase__ : Dict = ['stem']
lowercase__ : List[str] = MaskFormerSwinBackbone(config=a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = MaskFormerSwinModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> str:
return
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
@unittest.skip('Swin does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple:
lowercase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swin has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = 3
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a ):
lowercase__ : Union[str, Any] = 0
return t
def check_equivalence(a , a , a , a={} ):
with torch.no_grad():
lowercase__ : Optional[Any] = model(**a , return_dict=a , **a )
lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple()
def recursive_check(a , a ):
if isinstance(a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a , a ):
recursive_check(a , a )
elif isinstance(a , a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a , a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has"""
f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}."""
) , )
recursive_check(a , a )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
model.to(a )
model.eval()
lowercase__ : Tuple = self._prepare_for_class(a , a )
lowercase__ : Optional[Any] = self._prepare_for_class(a , a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a )
lowercase__ : int = self._prepare_for_class(a , a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _a):
lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ : Optional[Any] = backbone_class(a )
backbone.to(a )
backbone.eval()
lowercase__ : Union[str, Any] = backbone(**a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ : List[str] = backbone(**a , output_hidden_states=a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ : List[Any] = backbone(**a , output_attentions=a )
self.assertIsNotNone(outputs.attentions )
| 645
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
_UpperCamelCase : List[str] = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : List[Any] = "imagegpt"
lowerCamelCase__ : Optional[int] = ["past_key_values"]
lowerCamelCase__ : Dict = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , a=5_1_2 + 1 , a=3_2 * 3_2 , a=5_1_2 , a=2_4 , a=8 , a=None , a="quick_gelu" , a=0.1 , a=0.1 , a=0.1 , a=1e-5 , a=0.02 , a=True , a=True , a=False , a=False , a=False , **a , ) -> Any:
lowercase__ : Optional[int] = vocab_size
lowercase__ : Optional[int] = n_positions
lowercase__ : List[Any] = n_embd
lowercase__ : Dict = n_layer
lowercase__ : Optional[int] = n_head
lowercase__ : Union[str, Any] = n_inner
lowercase__ : List[str] = activation_function
lowercase__ : Tuple = resid_pdrop
lowercase__ : int = embd_pdrop
lowercase__ : Optional[int] = attn_pdrop
lowercase__ : Any = layer_norm_epsilon
lowercase__ : Any = initializer_range
lowercase__ : Optional[int] = scale_attn_weights
lowercase__ : int = use_cache
lowercase__ : List[str] = scale_attn_by_inverse_layer_idx
lowercase__ : List[str] = reorder_and_upcast_attn
lowercase__ : int = tie_word_embeddings
super().__init__(tie_word_embeddings=a , **a )
class UpperCAmelCase_ ( _a):
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def _UpperCAmelCase ( self , a , a = 1 , a = -1 , a = False , a = None , a = 3 , a = 3_2 , a = 3_2 , ) -> Mapping[str, Any]:
lowercase__ : Union[str, Any] = self._generate_dummy_images(a , a , a , a )
lowercase__ : int = dict(preprocessor(images=a , return_tensors=a ) )
return inputs
| 645
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
| 1
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : jnp.ndarray
lowerCamelCase__ : jnp.ndarray
class UpperCAmelCase_ ( nn.Module):
lowerCamelCase__ : int
lowerCamelCase__ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
lowerCamelCase__ : jnp.dtype = jnp.floataa
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase__ : str = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase__ : str = self.block_out_channels[i]
lowercase__ : Optional[Any] = self.block_out_channels[i + 1]
lowercase__ : str = nn.Conv(
a , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a )
lowercase__ : Optional[int] = nn.Conv(
a , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a )
lowercase__ : Optional[Any] = blocks
lowercase__ : Any = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , a ) -> str:
lowercase__ : Optional[Any] = self.conv_in(a )
lowercase__ : Tuple = nn.silu(a )
for block in self.blocks:
lowercase__ : Dict = block(a )
lowercase__ : int = nn.silu(a )
lowercase__ : Tuple = self.conv_out(a )
return embedding
@flax_register_to_config
class UpperCAmelCase_ ( nn.Module , _a , _a):
lowerCamelCase__ : int = 3_2
lowerCamelCase__ : int = 4
lowerCamelCase__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCamelCase__ : Union[bool, Tuple[bool]] = False
lowerCamelCase__ : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
lowerCamelCase__ : int = 2
lowerCamelCase__ : Union[int, Tuple[int]] = 8
lowerCamelCase__ : Optional[Union[int, Tuple[int]]] = None
lowerCamelCase__ : int = 1_2_8_0
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : bool = False
lowerCamelCase__ : jnp.dtype = jnp.floataa
lowerCamelCase__ : bool = True
lowerCamelCase__ : int = 0
lowerCamelCase__ : str = "rgb"
lowerCamelCase__ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def _UpperCAmelCase ( self , a ) -> FrozenDict:
# init input tensors
lowercase__ : Optional[int] = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase__ : Union[str, Any] = jnp.zeros(a , dtype=jnp.floataa )
lowercase__ : Dict = jnp.ones((1,) , dtype=jnp.intaa )
lowercase__ : Dict = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase__ : str = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase__ : str = jnp.zeros(a , dtype=jnp.floataa )
lowercase__ , lowercase__ : Union[str, Any] = jax.random.split(a )
lowercase__ : List[str] = {'params': params_rng, 'dropout': dropout_rng}
return self.init(a , a , a , a , a )["params"]
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Tuple = self.block_out_channels
lowercase__ : Any = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase__ : List[str] = self.num_attention_heads or self.attention_head_dim
# input
lowercase__ : int = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase__ : str = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase__ : Any = FlaxTimestepEmbedding(a , dtype=self.dtype )
lowercase__ : Union[str, Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase__ : Dict = self.only_cross_attention
if isinstance(a , a ):
lowercase__ : int = (only_cross_attention,) * len(self.down_block_types )
if isinstance(a , a ):
lowercase__ : Tuple = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase__ : int = []
lowercase__ : List[Any] = []
lowercase__ : Union[str, Any] = block_out_channels[0]
lowercase__ : Dict = nn.Conv(
a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase__ : List[Any] = output_channel
lowercase__ : Tuple = block_out_channels[i]
lowercase__ : List[str] = i == len(a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase__ : Optional[Any] = FlaxCrossAttnDownBlockaD(
in_channels=a , out_channels=a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase__ : Tuple = FlaxDownBlockaD(
in_channels=a , out_channels=a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(a )
for _ in range(self.layers_per_block ):
lowercase__ : Tuple = nn.Conv(
a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a )
if not is_final_block:
lowercase__ : Dict = nn.Conv(
a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a )
lowercase__ : int = down_blocks
lowercase__ : Any = controlnet_down_blocks
# mid
lowercase__ : str = block_out_channels[-1]
lowercase__ : Tuple = FlaxUNetMidBlockaDCrossAttn(
in_channels=a , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase__ : int = nn.Conv(
a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , a , a , a , a , a = 1.0 , a = True , a = False , ) -> Union[FlaxControlNetOutput, Tuple]:
lowercase__ : Optional[int] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase__ : List[Any] = jnp.flip(a , axis=1 )
# 1. time
if not isinstance(a , jnp.ndarray ):
lowercase__ : Union[str, Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(a , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase__ : Optional[Any] = timesteps.astype(dtype=jnp.floataa )
lowercase__ : str = jnp.expand_dims(a , 0 )
lowercase__ : Tuple = self.time_proj(a )
lowercase__ : List[str] = self.time_embedding(a )
# 2. pre-process
lowercase__ : Optional[Any] = jnp.transpose(a , (0, 2, 3, 1) )
lowercase__ : List[Any] = self.conv_in(a )
lowercase__ : Optional[int] = jnp.transpose(a , (0, 2, 3, 1) )
lowercase__ : Optional[Any] = self.controlnet_cond_embedding(a )
sample += controlnet_cond
# 3. down
lowercase__ : Union[str, Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(a , a ):
lowercase__ , lowercase__ : Any = down_block(a , a , a , deterministic=not train )
else:
lowercase__ , lowercase__ : Tuple = down_block(a , a , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase__ : Dict = self.mid_block(a , a , a , deterministic=not train )
# 5. contronet blocks
lowercase__ : Dict = ()
for down_block_res_sample, controlnet_block in zip(a , self.controlnet_down_blocks ):
lowercase__ : List[str] = controlnet_block(a )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase__ : Dict = controlnet_down_block_res_samples
lowercase__ : Union[str, Any] = self.controlnet_mid_block(a )
# 6. scaling
lowercase__ : int = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=a , mid_block_res_sample=a )
| 645
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : List[Any] = controlnet_params
lowercase__ : int = 'bird'
lowercase__ : List[Any] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__ : List[Any] = jax.random.PRNGKey(0 )
lowercase__ : Tuple = jax.random.split(a , jax.device_count() )
lowercase__ : str = replicate(a )
lowercase__ : List[str] = shard(a )
lowercase__ : Dict = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Optional[Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : Optional[Any] = controlnet_params
lowercase__ : List[Any] = 'Chef in the kitchen'
lowercase__ : List[str] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__ : List[str] = jax.random.PRNGKey(0 )
lowercase__ : str = jax.random.split(a , jax.device_count() )
lowercase__ : Optional[Any] = replicate(a )
lowercase__ : Optional[Any] = shard(a )
lowercase__ : List[Any] = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : str = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 645
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Any = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : int = 0
while number > 0:
lowercase__ : List[str] = number % 10
sum_of_digits += last_digit
lowercase__ : Any = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : List[str] = factorial(_lowerCAmelCase )
lowercase__ : Tuple = split_and_add(_lowerCAmelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 645
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 645
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 1
lowercase__ : List[str] = 3
lowercase__ : List[Any] = (3_2, 3_2)
lowercase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a )
return image
@property
def _UpperCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
lowercase__ : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
return model
@property
def _UpperCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
lowercase__ : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _UpperCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowercase__ : Dict = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(a )
@property
def _UpperCAmelCase ( self ) -> Any:
def extract(*a , **a ):
class UpperCAmelCase_ :
def __init__( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = torch.ones([0] )
def _UpperCAmelCase ( self , a ) -> str:
self.pixel_values.to(a )
return self
return Out()
return extract
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase__ : str = self.dummy_cond_unet
lowercase__ : Any = PNDMScheduler(skip_prk_steps=a )
lowercase__ : str = self.dummy_vae
lowercase__ : Union[str, Any] = self.dummy_text_encoder
lowercase__ : Optional[int] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
lowercase__ : Any = 7_7
lowercase__ : List[Any] = self.dummy_image.to(a )
lowercase__ : Dict = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase__ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=a , scheduler=a , vae=a , text_encoder=a , tokenizer=a , safety_checker=a , feature_extractor=self.dummy_extractor , )
lowercase__ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=a )
lowercase__ : Tuple = alt_pipe.to(a )
alt_pipe.set_progress_bar_config(disable=a )
lowercase__ : Tuple = 'A painting of a squirrel eating a burger'
lowercase__ : str = torch.Generator(device=a ).manual_seed(0 )
lowercase__ : List[Any] = alt_pipe(
[prompt] , generator=a , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=a , )
lowercase__ : int = output.images
lowercase__ : List[Any] = torch.Generator(device=a ).manual_seed(0 )
lowercase__ : Dict = alt_pipe(
[prompt] , generator=a , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=a , return_dict=a , )[0]
lowercase__ : Dict = image[0, -3:, -3:, -1]
lowercase__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowercase__ : List[str] = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.dummy_cond_unet
lowercase__ : Optional[Any] = PNDMScheduler(skip_prk_steps=a )
lowercase__ : Dict = self.dummy_vae
lowercase__ : List[Any] = self.dummy_text_encoder
lowercase__ : Optional[int] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
lowercase__ : int = 7_7
lowercase__ : str = self.dummy_image.to(a )
# put models in fp16
lowercase__ : int = unet.half()
lowercase__ : List[str] = vae.half()
lowercase__ : Dict = bert.half()
# make sure here that pndm scheduler skips prk
lowercase__ : List[Any] = AltDiffusionImgaImgPipeline(
unet=a , scheduler=a , vae=a , text_encoder=a , tokenizer=a , safety_checker=a , feature_extractor=self.dummy_extractor , )
lowercase__ : int = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=a )
lowercase__ : Union[str, Any] = alt_pipe.to(a )
alt_pipe.set_progress_bar_config(disable=a )
lowercase__ : Optional[Any] = 'A painting of a squirrel eating a burger'
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : List[str] = alt_pipe(
[prompt] , generator=a , num_inference_steps=2 , output_type='np' , image=a , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase__ : Optional[Any] = init_image.resize((7_6_0, 5_0_4) )
lowercase__ : Tuple = 'BAAI/AltDiffusion'
lowercase__ : Tuple = AltDiffusionImgaImgPipeline.from_pretrained(
a , safety_checker=a , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
lowercase__ : str = 'A fantasy landscape, trending on artstation'
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : List[str] = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , generator=a , output_type='np' , )
lowercase__ : Optional[Any] = output.images[0]
lowercase__ : Optional[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowercase__ : int = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowercase__ : Tuple = init_image.resize((7_6_8, 5_1_2) )
lowercase__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
lowercase__ : Union[str, Any] = 'BAAI/AltDiffusion'
lowercase__ : Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained(
a , safety_checker=a , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
lowercase__ : Optional[Any] = 'A fantasy landscape, trending on artstation'
lowercase__ : Union[str, Any] = torch.manual_seed(0 )
lowercase__ : Union[str, Any] = pipe(
prompt=a , image=a , strength=0.75 , guidance_scale=7.5 , generator=a , output_type='np' , )
lowercase__ : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 645
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 645
| 1
|
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class UpperCAmelCase_ ( nn.Module):
def __init__( self ) -> Dict:
super().__init__()
lowercase__ : Optional[Any] = nn.Linear(3 , 4 )
lowercase__ : Optional[int] = nn.BatchNormad(4 )
lowercase__ : List[str] = nn.Linear(4 , 5 )
def _UpperCAmelCase ( self , a ) -> Dict:
return self.lineara(self.batchnorm(self.lineara(a ) ) )
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a , model.state_dict() )
lowercase__ : Optional[int] = os.path.join(a , 'index.json' )
self.assertTrue(os.path.isfile(a ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowercase__ : Any = os.path.join(a , f"""{key}.dat""" )
self.assertTrue(os.path.isfile(a ) )
# TODO: add tests on the fact weights are properly loaded
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : List[str] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowercase__ : Optional[Any] = torch.randn(2 , 3 , dtype=a )
with TemporaryDirectory() as tmp_dir:
lowercase__ : Any = offload_weight(a , 'weight' , a , {} )
lowercase__ : Any = os.path.join(a , 'weight.dat' )
self.assertTrue(os.path.isfile(a ) )
self.assertDictEqual(a , {'weight': {'shape': [2, 3], 'dtype': str(a ).split('.' )[1]}} )
lowercase__ : Optional[int] = load_offloaded_weight(a , index['weight'] )
self.assertTrue(torch.equal(a , a ) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = ModelForTest()
lowercase__ : List[Any] = model.state_dict()
lowercase__ : List[str] = {k: v for k, v in state_dict.items() if 'linear2' not in k}
lowercase__ : str = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a , a )
lowercase__ : Tuple = OffloadedWeightsLoader(state_dict=a , save_folder=a )
# Every key is there with the right value
self.assertEqual(sorted(a ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(a , weight_map[key] ) )
lowercase__ : Optional[Any] = {k: v for k, v in state_dict.items() if 'weight' in k}
lowercase__ : str = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a , a )
lowercase__ : List[Any] = OffloadedWeightsLoader(state_dict=a , save_folder=a )
# Every key is there with the right value
self.assertEqual(sorted(a ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(a , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a , a )
# Duplicates are removed
lowercase__ : Union[str, Any] = OffloadedWeightsLoader(state_dict=a , save_folder=a )
# Every key is there with the right value
self.assertEqual(sorted(a ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(a , weight_map[key] ) )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = {'a.1': 0, 'a.10': 1, 'a.2': 2}
lowercase__ : List[str] = extract_submodules_state_dict(a , ['a.1', 'a.2'] )
self.assertDictEqual(a , {'a.1': 0, 'a.2': 2} )
lowercase__ : int = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
lowercase__ : Tuple = extract_submodules_state_dict(a , ['a.1', 'a.2'] )
self.assertDictEqual(a , {'a.1.a': 0, 'a.2.a': 2} )
| 645
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]:
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = patch_size
lowercase__ : Tuple = tubelet_size
lowercase__ : Optional[int] = num_frames
lowercase__ : Optional[int] = is_training
lowercase__ : int = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = mask_ratio
lowercase__ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase__ : Optional[Any] = (image_size // patch_size) ** 2
lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase__ : str = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = VideoMAEModel(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Any = torch.ones((self.num_masks,) )
lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase__ : str = model(a , a )
# model only returns predictions for masked patches
lowercase__ : str = mask.sum().item()
lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : str = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VideoMAEModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]:
lowercase__ : Union[str, Any] = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase__ : Union[str, Any] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase__ : Optional[Any] = True
lowercase__ : int = False
lowercase__ : Any = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : List[str] = len(a )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a , a , a ):
lowercase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase__ : str = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
a )
lowercase__ : str = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a )
# add boolean mask, indicating which patches to mask
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowercase__ : str = torch.load(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase__ : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to(
a )
with torch.no_grad():
lowercase__ : Any = model(**a )
lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
| 645
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
| 1
|
"""simple docstring"""
def a_ ( ):
'''simple docstring'''
lowercase__ : Dict = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowercase__ : Optional[Any] = 6
lowercase__ : Optional[int] = 1
lowercase__ : Optional[Any] = 1901
lowercase__ : str = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowercase__ : Tuple = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowercase__ : List[Any] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowercase__ : Optional[Any] = day - days_per_month[month - 2]
if month > 12:
year += 1
lowercase__ : str = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self , a ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowercase__ : str = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = 'sshleifer/tiny-gpt2'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : List[Any] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : Tuple = TensorFlowBenchmark(a , [config] )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : List[str] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : List[str] = TensorFlowBenchmark(a , [config] )
lowercase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[int] = AutoConfig.from_pretrained(a )
lowercase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a , [config] )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random'
lowercase__ : Any = AutoConfig.from_pretrained(a )
lowercase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : int = TensorFlowBenchmark(a , configs=[config] )
lowercase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
lowercase__ : Any = TensorFlowBenchmark(a )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , )
lowercase__ : Union[str, Any] = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a ):
self.assertTrue(hasattr(a , 'sequential' ) )
self.assertTrue(hasattr(a , 'cumulative' ) )
self.assertTrue(hasattr(a , 'current' ) )
self.assertTrue(hasattr(a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
lowercase__ : Optional[int] = TensorFlowBenchmark(a )
lowercase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
| 645
| 1
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( _a):
def __init__( self , a , a , a ) -> Union[str, Any]:
super().__init__()
self.register_modules(vqvae=a , unet=a , scheduler=a )
@torch.no_grad()
def __call__( self , a = 1 , a = None , a = 0.0 , a = 5_0 , a = "pil" , a = True , **a , ) -> Union[Tuple, ImagePipelineOutput]:
lowercase__ : List[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a , )
lowercase__ : int = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowercase__ : str = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ : Any = {}
if accepts_eta:
lowercase__ : str = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowercase__ : str = self.scheduler.scale_model_input(a , a )
# predict the noise residual
lowercase__ : str = self.unet(a , a ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : Optional[int] = self.scheduler.step(a , a , a , **a ).prev_sample
# decode the image latents with the VAE
lowercase__ : Optional[int] = self.vqvae.decode(a ).sample
lowercase__ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
_UpperCamelCase : Union[str, Any] = {"target_lang": "fi", "source_lang": "en"}
_UpperCamelCase : str = ">>zh<<"
_UpperCamelCase : Any = "Helsinki-NLP/"
if is_torch_available():
_UpperCamelCase : int = "pt"
elif is_tf_available():
_UpperCamelCase : Tuple = "tf"
else:
_UpperCamelCase : str = "jax"
@require_sentencepiece
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : List[Any] = MarianTokenizer
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : str = True
def _UpperCAmelCase ( self ) -> Tuple:
super().setUp()
lowercase__ : Tuple = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
lowercase__ : Optional[int] = dict(zip(a , range(len(a ) ) ) )
lowercase__ : Tuple = Path(self.tmpdirname )
save_json(a , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(a , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(a , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(a , save_dir / VOCAB_FILES_NAMES['target_spm'] )
lowercase__ : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self , **a ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **a )
def _UpperCAmelCase ( self , a ) -> Any:
return (
"This is a test",
"This is a test",
)
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Dict = '</s>'
lowercase__ : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(a ) , 9 )
def _UpperCAmelCase ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[str] = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
lowercase__ : Optional[int] = en_de_tokenizer(['I am a small frog'] , return_tensors=a )
self.assertIsInstance(a , a )
lowercase__ : List[Any] = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(a , batch.input_ids[0] )
lowercase__ : Any = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(a )
lowercase__ : Any = [x.name for x in Path(a ).glob('*' )]
self.assertIn('source.spm' , a )
MarianTokenizer.from_pretrained(a )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = self.get_tokenizer()
lowercase__ : Tuple = tok(
['I am a small frog' * 1_0_0_0, 'I am a small frog'] , padding=a , truncation=a , return_tensors=a )
self.assertIsInstance(a , a )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Optional[Any] = tok(['I am a tiny frog', 'I am a small frog'] , padding=a , return_tensors=a )
self.assertIsInstance(a , a )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
# fmt: off
lowercase__ : Any = {'input_ids': [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
lowercase__ : Any = 'Tämä on testi'
lowercase__ : Optional[int] = 'This is a test'
lowercase__ : Union[str, Any] = [7_6, 7, 2_0_4_7, 2]
lowercase__ : Optional[int] = [6_9, 1_2, 1_1, 9_4_0, 2]
lowercase__ : List[str] = tokenizer(a ).input_ids
self.assertListEqual(a , a )
lowercase__ : List[str] = tokenizer(text_target=a ).input_ids
self.assertListEqual(a , a )
lowercase__ : Optional[Any] = tokenizer.decode(a , skip_special_tokens=a )
self.assertEqual(a , a )
| 645
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCamelCase : Any = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def a_ ( _lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a))
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = None
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a )
lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a )
self.assertTrue(os.path.exists(a ) )
@pytest.mark.integration
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : Optional[int] = import_main_class(dataset_module.module_path )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__ : Optional[int] = None
builder_instance.download_and_prepare()
lowercase__ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 645
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Tuple = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : int = []
for i in range(self.batch_size ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a )
lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a )
labels.append(a )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[str] = YolosModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = YolosForObjectDetection(a )
model.to(a )
model.eval()
lowercase__ : Dict = model(pixel_values=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
def _UpperCAmelCase ( self , a , a , a=False ) -> Dict:
lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Dict = {}
lowercase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=a , dtype=torch.long )
lowercase__ : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=a , dtype=torch.float )
labels.append(a )
lowercase__ : Union[str, Any] = labels
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = YolosModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
# in YOLOS, the seq_len is different
lowercase__ : Tuple = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Dict = len(a )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : int = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(a , a , a ):
lowercase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(a , a ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
# YOLOS has a different seq_length
lowercase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(a , a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = YolosModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , )
lowercase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a )
lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7]
lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
| 645
| 1
|
"""simple docstring"""
_UpperCamelCase : int = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
_UpperCamelCase : Optional[Any] = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Dict = from_type.lower().strip('s' )
lowercase__ : str = to_type.lower().strip('s' )
lowercase__ : Tuple = UNIT_SYMBOL.get(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Optional[Any] = UNIT_SYMBOL.get(_lowerCAmelCase , _lowerCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
lowercase__ : Optional[Any] = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_lowerCAmelCase )}"""
)
raise ValueError(_lowerCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
lowercase__ : List[str] = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_lowerCAmelCase )}"""
)
raise ValueError(_lowerCAmelCase )
lowercase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized]
lowercase__ : str = METRIC_CONVERSION[to_sanitized]
lowercase__ : Union[str, Any] = 1
if from_exponent > to_exponent:
lowercase__ : int = from_exponent - to_exponent
else:
lowercase__ : Dict = -(to_exponent - from_exponent)
return value * pow(10 , _lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 645
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Union[str, Any] = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : Union[str, Any] = 32
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCAmelCase_ :
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a ) -> Any:
gc.collect()
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = torch.cuda.memory_allocated()
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase__ : List[Any] = bamb(self.end - self.begin )
lowercase__ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[int] = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : int = int(config['batch_size'] )
lowercase__ : Union[str, Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Tuple = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : List[Any] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , )
lowercase__ : Any = parser.parse_args()
lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
| 1
|
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_UpperCamelCase : int = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : Any = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
_UpperCamelCase : Union[str, Any] = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Union[str, Any] = list(s_dict.keys() )
for key in keys:
lowercase__ : Dict = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowercase__ : Any = new_key.replace(_lowerCAmelCase , _lowerCAmelCase )
print(f"""{key} -> {new_key}""" )
lowercase__ : int = s_dict.pop(_lowerCAmelCase )
return s_dict
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ : Optional[Any] = emb.weight.shape
lowercase__ : Dict = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
lowercase__ : List[str] = emb.weight.data
return lin_layer
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
lowercase__ : Any = os.path.basename(_lowerCAmelCase )
lowercase__ : Optional[int] = url.split('/' )[-2]
lowercase__ : Union[str, Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if os.path.exists(_lowerCAmelCase ) and not os.path.isfile(_lowerCAmelCase ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(_lowerCAmelCase ):
lowercase__ : Optional[Any] = open(_lowerCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(_lowerCAmelCase ) as source, open(_lowerCAmelCase , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_lowerCAmelCase , unit_divisor=1024 ) as loop:
while True:
lowercase__ : List[str] = source.read(8192 )
if not buffer:
break
output.write(_lowerCAmelCase )
loop.update(len(_lowerCAmelCase ) )
lowercase__ : Union[str, Any] = open(_lowerCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
lowercase__ : Dict = _download(_MODELS[checkpoint_path] )
else:
lowercase__ : str = torch.load(_lowerCAmelCase , map_location='cpu' )
lowercase__ : List[str] = original_checkpoint['dims']
lowercase__ : Dict = original_checkpoint['model_state_dict']
lowercase__ : Optional[Any] = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(_lowerCAmelCase )
rename_keys(_lowerCAmelCase )
lowercase__ : List[str] = True
lowercase__ : List[Any] = state_dict['decoder.layers.0.fc1.weight'].shape[0]
lowercase__ : str = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_lowerCAmelCase , decoder_ffn_dim=_lowerCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
lowercase__ : Optional[Any] = WhisperForConditionalGeneration(_lowerCAmelCase )
lowercase__ , lowercase__ : List[Any] = model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0 and not set(_lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
lowercase__ : Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase__ : List[str] = proj_out_weights
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_UpperCamelCase : List[Any] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 645
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = [0] * len(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
lowercase__ : List[str] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : Union[str, Any] = j
return prefix_result
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
return max(prefix_function(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_UpperCamelCase : int = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
_UpperCamelCase : str = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
_UpperCamelCase : List[str] = r"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def _UpperCAmelCase ( self , a , a ) -> List[str]:
lowercase__ : Optional[int] = 0.0
for i, j in zip(a , a ):
n_correct += 1.0 if math_equivalence.is_equiv(a , a ) else 0.0
lowercase__ : List[str] = n_correct / len(a )
return {
"accuracy": accuracy,
}
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]:
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0}
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : str = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : int = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : str = do_center_crop
lowercase__ : List[Any] = crop_size
lowercase__ : Union[str, Any] = do_flip_channel_order
def _UpperCAmelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = MobileViTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_flip_channel_order' ) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 645
| 1
|
"""simple docstring"""
from math import sqrt
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ : List[str] = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ : int = False
for divisor in range(2 , int(round(sqrt(_lowerCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ : List[str] = False
break
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'status' must been from type bool"
return status
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ : int = list(range(2 , n + 1 ) )
lowercase__ : Dict = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + 1 , len(_lowerCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ : Tuple = 0
# filters actual prime numbers.
lowercase__ : List[Any] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
lowercase__ : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_lowerCAmelCase ):
ans.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ : Any = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ : List[Any] = 2
lowercase__ : List[str] = number
if number == 0 or number == 1:
ans.append(_lowerCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_lowerCAmelCase ):
while quotient != 1:
if is_prime(_lowerCAmelCase ) and (quotient % factor == 0):
ans.append(_lowerCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ : List[str] = 0
# prime factorization of 'number'
lowercase__ : str = prime_factorization(_lowerCAmelCase )
lowercase__ : Dict = max(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int"
return ans
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ : int = 0
# prime factorization of 'number'
lowercase__ : Any = prime_factorization(_lowerCAmelCase )
lowercase__ : str = min(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int"
return ans
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _lowerCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _lowerCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (number > 2) and is_even(_lowerCAmelCase )
), "'number' must been an int, even and > 2"
lowercase__ : Dict = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ : Optional[Any] = get_prime_numbers(_lowerCAmelCase )
lowercase__ : Optional[int] = len(_lowerCAmelCase )
# run variable for while-loops.
lowercase__ : List[Any] = 0
lowercase__ : Any = None
# exit variable. for break up the loops
lowercase__ : List[Any] = True
while i < len_pn and loop:
lowercase__ : Dict = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ : Optional[Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (len(_lowerCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ):
'''simple docstring'''
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ : List[Any] = 0
while numbera != 0:
lowercase__ : Union[str, Any] = numbera % numbera
lowercase__ : Any = numbera
lowercase__ : Any = rest
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ):
'''simple docstring'''
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ : Tuple = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ : int = prime_factorization(_lowerCAmelCase )
lowercase__ : Dict = prime_factorization(_lowerCAmelCase )
elif numbera == 1 or numbera == 1:
lowercase__ : Any = []
lowercase__ : Tuple = []
lowercase__ : Tuple = max(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : str = 0
lowercase__ : List[Any] = 0
lowercase__ : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ : Tuple = prime_fac_a.count(_lowerCAmelCase )
lowercase__ : List[str] = prime_fac_a.count(_lowerCAmelCase )
for _ in range(max(_lowerCAmelCase , _lowerCAmelCase ) ):
ans *= n
else:
lowercase__ : Optional[Any] = prime_fac_a.count(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
ans *= n
done.append(_lowerCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ : int = prime_fac_a.count(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
ans *= n
done.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'number' must been a positive int"
lowercase__ : int = 0
lowercase__ : List[Any] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_lowerCAmelCase ):
ans += 1
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and is_prime(
_lowerCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
assert (
is_prime(_lowerCAmelCase ) and is_prime(_lowerCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ : List[str] = p_number_a + 1 # jump to the next number
lowercase__ : Union[str, Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_lowerCAmelCase ):
number += 1
while number < p_number_a:
ans.append(_lowerCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(_lowerCAmelCase ):
number += 1
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and ans[0] != p_number_a
and ans[len(_lowerCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a_ ( _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ : Any = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_lowerCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(_lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ : int = get_divisors(_lowerCAmelCase )
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (divisors[0] == 1)
and (divisors[len(_lowerCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any ):
'''simple docstring'''
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ : Dict = gcd(abs(_lowerCAmelCase ) , abs(_lowerCAmelCase ) )
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a_ ( _lowerCAmelCase : Dict ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ : Optional[Any] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ : Union[str, Any] = 0
lowercase__ : Optional[Any] = 1
lowercase__ : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ : Optional[int] = ans
ans += fiba
lowercase__ : List[Any] = tmp
return ans
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict:
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = num_choices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' )
lowercase__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Any = model(a , attention_mask=a )[0]
lowercase__ : Tuple = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a )
lowercase__ : Optional[Any] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=7 , a=False , a=True , a=False , a=False , a=1_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Union[str, Any]:
lowercase__ : Optional[Any] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : str = seq_length
lowercase__ : Optional[Any] = is_training
lowercase__ : int = use_input_mask
lowercase__ : List[str] = use_token_type_ids
lowercase__ : List[Any] = use_labels
lowercase__ : Any = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : str = intermediate_size
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : str = type_vocab_size
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Tuple = num_labels
lowercase__ : Any = num_choices
lowercase__ : List[Any] = scope
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Optional[Any] = None
if self.use_input_mask:
lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[int] = None
lowercase__ : Any = None
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Tuple = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=a , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> str:
lowercase__ : List[str] = EsmForProteinFolding(config=a ).float()
model.to(a )
model.eval()
lowercase__ : Dict = model(a , attention_mask=a )
lowercase__ : List[str] = model(a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Optional[int] = config_and_inputs
lowercase__ : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Any = False
lowerCamelCase__ : List[Any] = (EsmForProteinFolding,) if is_torch_available() else ()
lowerCamelCase__ : List[str] = ()
lowerCamelCase__ : Optional[int] = {} if is_torch_available() else {}
lowerCamelCase__ : Optional[Any] = False
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[Any] = EsmFoldModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
@unittest.skip('Does not support attention outputs' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def _UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def _UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def _UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def _UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def _UpperCAmelCase ( self ) -> str:
pass
@unittest.skip('ESMFold only has one output format.' )
def _UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support input chunking.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def _UpperCAmelCase ( self ) -> str:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def _UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[str]:
pass
@require_torch
class UpperCAmelCase_ ( _a):
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
lowercase__ : str = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowercase__ : Union[str, Any] = model(a )['positions']
lowercase__ : Any = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , a , atol=1e-4 ) )
| 645
|
"""simple docstring"""
from collections.abc import Sequence
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
lowercase__ : int = 0.0
for coeff in reversed(_lowerCAmelCase ):
lowercase__ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase : Dict = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 645
| 1
|
"""simple docstring"""
from math import isqrt, loga
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
lowercase__ : Optional[int] = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def a_ ( _lowerCAmelCase : int = 80_0800 , _lowerCAmelCase : int = 80_0800 ):
'''simple docstring'''
lowercase__ : Optional[Any] = degree * loga(_lowerCAmelCase )
lowercase__ : Tuple = int(_lowerCAmelCase )
lowercase__ : Dict = calculate_prime_numbers(_lowerCAmelCase )
lowercase__ : Dict = 0
lowercase__ : int = 0
lowercase__ : Tuple = len(_lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCamelCase : Any = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def a_ ( _lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a))
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = None
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a )
lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a )
self.assertTrue(os.path.exists(a ) )
@pytest.mark.integration
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : Optional[int] = import_main_class(dataset_module.module_path )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__ : Optional[int] = None
builder_instance.download_and_prepare()
lowercase__ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 645
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_UpperCamelCase : Dict = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Tuple = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **a ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase__ : Dict = deprecated_arg[3:]
lowercase__ : Tuple = not kwargs.pop(a )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase__ : Optional[int] = kwargs.pop('tpu_name' , self.tpu_name )
lowercase__ : List[Any] = kwargs.pop('device_idx' , self.device_idx )
lowercase__ : Optional[int] = kwargs.pop('eager_mode' , self.eager_mode )
lowercase__ : Any = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**a )
lowerCamelCase__ : str = field(
default=_a , metadata={"help": "Name of TPU"} , )
lowerCamelCase__ : int = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCamelCase__ : bool = field(default=_a , metadata={"help": "Benchmark models in eager model."})
lowerCamelCase__ : bool = field(
default=_a , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _UpperCAmelCase ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'] )
lowercase__ : Any = None
if self.tpu:
try:
if self.tpu_name:
lowercase__ : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase__ : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase__ : List[str] = None
return tpu
@cached_property
def _UpperCAmelCase ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase__ : List[Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
lowercase__ : int = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
lowercase__ : str = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def _UpperCAmelCase ( self ) -> bool:
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def _UpperCAmelCase ( self ) -> "tf.distribute.Strategy":
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def _UpperCAmelCase ( self ) -> int:
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _UpperCAmelCase ( self ) -> bool:
return self.n_gpu > 0
| 645
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a_ ( _lowerCAmelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
'''simple docstring'''
lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowerCAmelCase , _lowerCAmelCase )
# Predict target for test data
lowercase__ : str = xgb.predict(_lowerCAmelCase )
lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 )
return predictions
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = fetch_california_housing()
lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split(
_lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 )
lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 645
| 1
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : List[Any] = controlnet_params
lowercase__ : int = 'bird'
lowercase__ : List[Any] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__ : List[Any] = jax.random.PRNGKey(0 )
lowercase__ : Tuple = jax.random.split(a , jax.device_count() )
lowercase__ : str = replicate(a )
lowercase__ : List[str] = shard(a )
lowercase__ : Dict = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Optional[Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : Optional[Any] = controlnet_params
lowercase__ : List[Any] = 'Chef in the kitchen'
lowercase__ : List[str] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__ : List[str] = jax.random.PRNGKey(0 )
lowercase__ : str = jax.random.split(a , jax.device_count() )
lowercase__ : Optional[Any] = replicate(a )
lowercase__ : Optional[Any] = shard(a )
lowercase__ : List[Any] = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : str = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 645
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]:
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = patch_size
lowercase__ : Tuple = tubelet_size
lowercase__ : Optional[int] = num_frames
lowercase__ : Optional[int] = is_training
lowercase__ : int = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = mask_ratio
lowercase__ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase__ : Optional[Any] = (image_size // patch_size) ** 2
lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase__ : str = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = VideoMAEModel(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Any = torch.ones((self.num_masks,) )
lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase__ : str = model(a , a )
# model only returns predictions for masked patches
lowercase__ : str = mask.sum().item()
lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : str = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VideoMAEModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]:
lowercase__ : Union[str, Any] = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase__ : Union[str, Any] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase__ : Optional[Any] = True
lowercase__ : int = False
lowercase__ : Any = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : List[str] = len(a )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a , a , a ):
lowercase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase__ : str = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
a )
lowercase__ : str = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a )
# add boolean mask, indicating which patches to mask
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowercase__ : str = torch.load(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase__ : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to(
a )
with torch.no_grad():
lowercase__ : Any = model(**a )
lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a_ ( _lowerCAmelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
'''simple docstring'''
lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowerCAmelCase , _lowerCAmelCase )
# Predict target for test data
lowercase__ : str = xgb.predict(_lowerCAmelCase )
lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 )
return predictions
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = fetch_california_housing()
lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split(
_lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 )
lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 645
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCamelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Dict = value
elif weight_type == "weight_v":
lowercase__ : List[str] = value
elif weight_type == "bias":
lowercase__ : Optional[Any] = value
else:
lowercase__ : List[str] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : int = True
if "*" in mapped_key:
lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2]
lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
lowercase__ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase__ : int = 'weight_v'
elif "bias" in name:
lowercase__ : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : int = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : int = full_name.split('conv_layers.' )[-1]
lowercase__ : int = name.split('.' )
lowercase__ : int = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase )
else:
lowercase__ : Any = UniSpeechSatConfig()
lowercase__ : Union[str, Any] = ''
if is_finetuned:
lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase )
else:
lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 645
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[Any] = "markuplm"
def __init__( self , a=3_0_5_2_2 , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=2 , a=0.02 , a=1e-12 , a=0 , a=0 , a=2 , a=2_5_6 , a=1_0_2_4 , a=2_1_6 , a=1_0_0_1 , a=3_2 , a=5_0 , a="absolute" , a=True , a=None , **a , ) -> List[str]:
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , **a , )
lowercase__ : int = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : int = hidden_act
lowercase__ : Dict = intermediate_size
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : str = type_vocab_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = layer_norm_eps
lowercase__ : List[Any] = position_embedding_type
lowercase__ : Optional[int] = use_cache
lowercase__ : str = classifier_dropout
# additional properties
lowercase__ : Dict = max_depth
lowercase__ : Dict = max_xpath_tag_unit_embeddings
lowercase__ : List[str] = max_xpath_subs_unit_embeddings
lowercase__ : Union[str, Any] = tag_pad_id
lowercase__ : List[Any] = subs_pad_id
lowercase__ : int = xpath_unit_hidden_size
| 645
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int:
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = embed_dim
lowercase__ : Any = depths
lowercase__ : Dict = num_heads
lowercase__ : List[str] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Tuple = qkv_bias
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = drop_path_rate
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = use_absolute_embeddings
lowercase__ : Optional[Any] = patch_norm
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : List[str] = is_training
lowercase__ : int = scope
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : List[str] = encoder_stride
lowercase__ : Optional[Any] = out_features
lowercase__ : Dict = out_indices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Tuple = MaskFormerSwinModel(config=a )
model.to(a )
model.eval()
lowercase__ : str = model(a )
lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(a ):
lowercase__ : Dict = ['stem']
lowercase__ : List[str] = MaskFormerSwinBackbone(config=a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = MaskFormerSwinModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> str:
return
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
@unittest.skip('Swin does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple:
lowercase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swin has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = 3
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a ):
lowercase__ : Union[str, Any] = 0
return t
def check_equivalence(a , a , a , a={} ):
with torch.no_grad():
lowercase__ : Optional[Any] = model(**a , return_dict=a , **a )
lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple()
def recursive_check(a , a ):
if isinstance(a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a , a ):
recursive_check(a , a )
elif isinstance(a , a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a , a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has"""
f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}."""
) , )
recursive_check(a , a )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
model.to(a )
model.eval()
lowercase__ : Tuple = self._prepare_for_class(a , a )
lowercase__ : Optional[Any] = self._prepare_for_class(a , a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a )
lowercase__ : int = self._prepare_for_class(a , a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _a):
lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ : Optional[Any] = backbone_class(a )
backbone.to(a )
backbone.eval()
lowercase__ : Union[str, Any] = backbone(**a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ : List[str] = backbone(**a , output_hidden_states=a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ : List[Any] = backbone(**a , output_attentions=a )
self.assertIsNotNone(outputs.attentions )
| 645
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : int = len(_lowerCAmelCase )
lowercase__ : int = len(_lowerCAmelCase )
lowercase__ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
lowercase__ : list = []
for char_count in range(_lowerCAmelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_lowerCAmelCase )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 645
|
"""simple docstring"""
import math
def a_ ( _lowerCAmelCase : int = 100 ):
'''simple docstring'''
lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
| 1
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_UpperCamelCase : List[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
lowercase__ : Optional[Any] = k.replace(_lowerCAmelCase , _lowerCAmelCase )
return k
def a_ ( _lowerCAmelCase : dict , _lowerCAmelCase : dict ):
'''simple docstring'''
lowercase__ : Optional[Any] = DEFAULTS.copy()
cfg_kwargs.update(_lowerCAmelCase )
lowercase__ : Union[str, Any] = PegasusConfig(**_lowerCAmelCase )
lowercase__ : Dict = PegasusForConditionalGeneration(_lowerCAmelCase )
lowercase__ : Optional[Any] = torch_model.model.state_dict()
lowercase__ : str = {}
for k, v in tf_weights.items():
lowercase__ : List[Any] = rename_state_dict_key(_lowerCAmelCase )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
lowercase__ : str = v.T
lowercase__ : List[Any] = torch.tensor(_lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
lowercase__ : Any = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
lowercase__ : List[str] = mapping['shared.weight']
lowercase__ : Dict = mapping['shared.weight']
lowercase__ : str = {k: torch.zeros_like(_lowerCAmelCase ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**_lowerCAmelCase )
lowercase__ , lowercase__ : List[Any] = torch_model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
lowercase__ : Optional[Any] = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def a_ ( _lowerCAmelCase : Tuple="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
lowercase__ : List[str] = tf.train.list_variables(_lowerCAmelCase )
lowercase__ : Tuple = {}
lowercase__ : List[Any] = ['Adafactor', 'global_step']
for name, shape in tqdm(_lowerCAmelCase , desc='converting tf checkpoint to dict' ):
lowercase__ : Optional[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase__ : Dict = tf.train.load_variable(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Optional[Any] = array
return tf_weights
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : str = Path(_lowerCAmelCase ).parent.name
lowercase__ : Optional[int] = task_specific_params[f"""summarization_{dataset}"""]['max_position_embeddings']
lowercase__ : Union[str, Any] = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=_lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_lowerCAmelCase )
# convert model
lowercase__ : Tuple = get_tf_weights_as_numpy(_lowerCAmelCase )
lowercase__ : List[str] = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
lowercase__ : str = task_specific_params
lowercase__ : Dict = convert_pegasus(_lowerCAmelCase , _lowerCAmelCase )
torch_model.save_pretrained(_lowerCAmelCase )
lowercase__ : Dict = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(_lowerCAmelCase , Path(_lowerCAmelCase ) / 'pytorch_model.bin' )
if __name__ == "__main__":
_UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_UpperCamelCase : List[Any] = parser.parse_args()
if args.save_dir is None:
_UpperCamelCase : Dict = Path(args.tf_ckpt_path).parent.name
_UpperCamelCase : Tuple = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 645
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : List[Any] = controlnet_params
lowercase__ : int = 'bird'
lowercase__ : List[Any] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase__ : List[Any] = jax.random.PRNGKey(0 )
lowercase__ : Tuple = jax.random.split(a , jax.device_count() )
lowercase__ : str = replicate(a )
lowercase__ : List[str] = shard(a )
lowercase__ : Dict = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : Optional[Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa )
lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
lowercase__ : Optional[Any] = controlnet_params
lowercase__ : List[Any] = 'Chef in the kitchen'
lowercase__ : List[str] = jax.device_count()
lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase__ : List[str] = jax.random.PRNGKey(0 )
lowercase__ : str = jax.random.split(a , jax.device_count() )
lowercase__ : Optional[Any] = replicate(a )
lowercase__ : Optional[Any] = shard(a )
lowercase__ : List[Any] = shard(a )
lowercase__ : List[Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : str = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 645
| 1
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a_ ( ):
'''simple docstring'''
lowercase__ : int = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=_lowerCAmelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=_lowerCAmelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=_lowerCAmelCase )
return parser.parse_args()
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = parse_args()
# Import training_script as a module.
lowercase__ : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase__ : Any = script_fpath.stem
lowercase__ : Tuple = importlib.import_module(_lowerCAmelCase )
# Patch sys.argv
lowercase__ : str = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 645
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 645
| 1
|
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : str = model.config
lowercase__ : Optional[int] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowercase__ : Optional[int] = MBartConfig(
is_decoder=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , add_cross_attention=_lowerCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_lowerCAmelCase , add_final_layer_norm=_lowerCAmelCase , )
return encoder_config, decoder_config
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if "encoder.model" in name:
lowercase__ : str = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
lowercase__ : Any = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
lowercase__ : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowercase__ : List[str] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
lowercase__ : str = 'encoder.' + name
if "attn.proj" in name:
lowercase__ : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
lowercase__ : str = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase__ : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase__ : int = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase__ : Any = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
lowercase__ : List[str] = 'encoder.layernorm.weight'
if name == "encoder.norm.bias":
lowercase__ : List[Any] = 'encoder.layernorm.bias'
return name
def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ : Dict = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
lowercase__ : str = key.split('.' )
lowercase__ : int = int(key_split[3] )
lowercase__ : Tuple = int(key_split[5] )
lowercase__ : Any = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase__ : Optional[Any] = val[:dim, :]
lowercase__ : List[Any] = val[dim : dim * 2, :]
lowercase__ : Tuple = val[-dim:, :]
else:
lowercase__ : Union[str, Any] = val[:dim]
lowercase__ : List[str] = val[dim : dim * 2]
lowercase__ : Tuple = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowercase__ : List[Any] = val
return orig_state_dict
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : List[Any]=False ):
'''simple docstring'''
lowercase__ : Optional[Any] = DonutModel.from_pretrained(_lowerCAmelCase ).eval()
# load HuggingFace model
lowercase__ , lowercase__ : Dict = get_configs(_lowerCAmelCase )
lowercase__ : Dict = DonutSwinModel(_lowerCAmelCase )
lowercase__ : str = MBartForCausalLM(_lowerCAmelCase )
lowercase__ : Union[str, Any] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
model.eval()
lowercase__ : int = original_model.state_dict()
lowercase__ : Optional[int] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# verify results on scanned document
lowercase__ : Union[str, Any] = load_dataset('hf-internal-testing/example-documents' )
lowercase__ : str = dataset['test'][0]['image'].convert('RGB' )
lowercase__ : List[Any] = XLMRobertaTokenizerFast.from_pretrained(_lowerCAmelCase , from_slow=_lowerCAmelCase )
lowercase__ : Any = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowercase__ : List[str] = DonutProcessor(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : int = processor(_lowerCAmelCase , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowercase__ : Dict = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
lowercase__ : Tuple = 'When is the coffee break?'
lowercase__ : str = task_prompt.replace('{user_input}' , _lowerCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowercase__ : Union[str, Any] = '<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowercase__ : List[Any] = '<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowercase__ : Optional[int] = 's_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowercase__ : Optional[int] = '<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowercase__ : str = 'hello world'
else:
raise ValueError('Model name not supported' )
lowercase__ : str = original_model.decoder.tokenizer(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors='pt' )[
'input_ids'
]
lowercase__ : Optional[int] = original_model.encoder.model.patch_embed(_lowerCAmelCase )
lowercase__ , lowercase__ : str = model.encoder.embeddings(_lowerCAmelCase )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
# verify encoder hidden states
lowercase__ : Union[str, Any] = original_model.encoder(_lowerCAmelCase )
lowercase__ : Optional[Any] = model.encoder(_lowerCAmelCase ).last_hidden_state
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
# verify decoder hidden states
lowercase__ : Tuple = original_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).logits
lowercase__ : str = model(_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
_UpperCamelCase : int = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 645
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' )
lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' )
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : List[str] = model(**a )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Union[str, Any] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , a )
lowercase__ : Tuple = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 645
| 1
|
"""simple docstring"""
_UpperCamelCase : Optional[int] = [
(10_00, "M"),
(9_00, "CM"),
(5_00, "D"),
(4_00, "CD"),
(1_00, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
lowercase__ : int = 0
lowercase__ : Optional[int] = 0
while place < len(_lowerCAmelCase ):
if (place + 1 < len(_lowerCAmelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : int = []
for arabic, roman in ROMAN:
((lowercase__) , (lowercase__)) : str = divmod(_lowerCAmelCase , _lowerCAmelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a):
def __init__( self ) -> Any:
lowercase__ : Tuple = []
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_init_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]:
self.events.append('on_train_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_train_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_epoch_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]:
self.events.append('on_epoch_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_step_begin' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> str:
self.events.append('on_step_end' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> int:
self.events.append('on_evaluate' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple:
self.events.append('on_predict' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]:
self.events.append('on_save' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
self.events.append('on_log' )
def _UpperCAmelCase ( self , a , a , a , **a ) -> Any:
self.events.append('on_prediction_step' )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> str:
lowercase__ : str = tempfile.mkdtemp()
def _UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.output_dir )
def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase__ : str = RegressionDataset(length=a )
lowercase__ : Any = RegressionDataset(length=a )
lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a )
lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a )
lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a )
return Trainer(
a , a , train_dataset=a , eval_dataset=a , callbacks=a , )
def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]:
self.assertEqual(len(a ) , len(a ) )
# Order doesn't matter
lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ )
for cba, cba in zip(a , a ):
if isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(a , a )
elif isinstance(a , a ) and not isinstance(a , a ):
self.assertEqual(a , cba.__class__ )
elif not isinstance(a , a ) and isinstance(a , a ):
self.assertEqual(cba.__class__ , a )
else:
self.assertEqual(a , a )
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
lowercase__ : Dict = ['on_init_end', 'on_train_begin']
lowercase__ : List[Any] = 0
lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() )
lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(a ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : int = self.get_trainer()
lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# Callbacks passed at init are added to the default callbacks
lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a )
lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase__ : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Optional[Any] = self.get_trainer()
lowercase__ : List[Any] = trainer.pop_callback(a )
self.assertEqual(cb.__class__ , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
# We can also add, pop, or remove by instance
lowercase__ : int = self.get_trainer()
lowercase__ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a )
expected_callbacks.remove(a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
lowercase__ : Tuple = self.get_trainer()
lowercase__ : Dict = trainer.callback_handler.callbacks[0]
lowercase__ : Union[str, Any] = trainer.pop_callback(a )
self.assertEqual(a , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
trainer.add_callback(a )
expected_callbacks.insert(0 , a )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a )
def _UpperCAmelCase ( self ) -> Tuple:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=a )
lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# Independent log/save/eval
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# A bit of everything
lowercase__ : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase__ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a , self.get_expected_events(a ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase__ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a ) in warn_mock.call_args[0][0]
| 645
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase : Tuple = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : list[list[int]] = [[0 for _ in range(_lowerCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowercase__ : Dict = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_UpperCamelCase : List[str] = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
_UpperCamelCase : Any = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self , a ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowercase__ : str = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = 'sshleifer/tiny-gpt2'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification'
lowercase__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2'
lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : List[Any] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , )
lowercase__ : Tuple = TensorFlowBenchmark(a , [config] )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : List[str] = AutoConfig.from_pretrained(a )
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : List[str] = TensorFlowBenchmark(a , [config] )
lowercase__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : Optional[Any] = TensorFlowBenchmark(a )
lowercase__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[int] = AutoConfig.from_pretrained(a )
lowercase__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : str = TensorFlowBenchmark(a , [config] )
lowercase__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random'
lowercase__ : Any = AutoConfig.from_pretrained(a )
lowercase__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , )
lowercase__ : int = TensorFlowBenchmark(a , configs=[config] )
lowercase__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , )
lowercase__ : Any = TensorFlowBenchmark(a )
lowercase__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Any = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , )
lowercase__ : Union[str, Any] = TensorFlowBenchmark(a )
benchmark.run()
self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(a ):
self.assertTrue(hasattr(a , 'sequential' ) )
self.assertTrue(hasattr(a , 'cumulative' ) )
self.assertTrue(hasattr(a , 'current' ) )
self.assertTrue(hasattr(a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , )
lowercase__ : Optional[int] = TensorFlowBenchmark(a )
lowercase__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
| 645
| 1
|
"""simple docstring"""
_UpperCamelCase : str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def a_ ( ):
'''simple docstring'''
lowercase__ : Any = input('Enter message: ' )
lowercase__ : Optional[int] = input('Enter key [alphanumeric]: ' )
lowercase__ : str = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
lowercase__ : str = 'encrypt'
lowercase__ : Union[str, Any] = encrypt_message(_lowerCAmelCase , _lowerCAmelCase )
elif mode.lower().startswith('d' ):
lowercase__ : Optional[int] = 'decrypt'
lowercase__ : Union[str, Any] = decrypt_message(_lowerCAmelCase , _lowerCAmelCase )
print(f"""\n{mode.title()}ed message:""" )
print(_lowerCAmelCase )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
return translate_message(_lowerCAmelCase , _lowerCAmelCase , 'encrypt' )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
return translate_message(_lowerCAmelCase , _lowerCAmelCase , 'decrypt' )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : str = []
lowercase__ : Optional[int] = 0
lowercase__ : Any = key.upper()
for symbol in message:
lowercase__ : int = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowerCAmelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowerCAmelCase ):
lowercase__ : Optional[Any] = 0
else:
translated.append(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase_ ( _a):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : List[Any] = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Optional[Any] = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : str = scope
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
lowercase__ : Optional[Any] = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Tuple = DistilBertModel(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , a )
lowercase__ : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict:
lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int:
lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]:
lowercase__ : int = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowercase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple:
lowercase__ : List[Any] = self.num_choices
lowercase__ : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : int = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ : str = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = DistilBertModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 )
def _UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(config=a )
lowercase__ : int = self._prepare_for_class(a , a )
lowercase__ : Tuple = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) )
lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : Optional[Any] = model(a , attention_mask=a )[0]
lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a )
lowercase__ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def a_ ( _lowerCAmelCase : Iterable[str] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Any = iter(_lowerCAmelCase )
while True:
lowercase__ : List[str] = tuple(itertools.islice(_lowerCAmelCase , _lowerCAmelCase ) )
if not chunk:
return
yield chunk
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Tuple = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
lowercase__ : Tuple = ''
if len(_lowerCAmelCase ) < 2:
return dirty
for i in range(len(_lowerCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowerCAmelCase ) & 1:
clean += "X"
return clean
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowercase__ : Optional[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowerCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowerCAmelCase )
return table
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : int = generate_table(_lowerCAmelCase )
lowercase__ : List[Any] = prepare_input(_lowerCAmelCase )
lowercase__ : Union[str, Any] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCAmelCase , 2 ):
lowercase__ , lowercase__ : Any = divmod(table.index(_lowerCAmelCase ) , 5 )
lowercase__ , lowercase__ : Optional[int] = divmod(table.index(_lowerCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Optional[int] = generate_table(_lowerCAmelCase )
lowercase__ : Optional[int] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCAmelCase , 2 ):
lowercase__ , lowercase__ : str = divmod(table.index(_lowerCAmelCase ) , 5 )
lowercase__ , lowercase__ : List[str] = divmod(table.index(_lowerCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 645
|
"""simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = tempfile.mkdtemp()
lowercase__ : Tuple = SamImageProcessor()
lowercase__ : int = SamProcessor(a )
processor.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self , **a ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).image_processor
def _UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowercase__ : str = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Any = self.get_image_processor(do_normalize=a , padding_value=1.0 )
lowercase__ : Optional[int] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : List[Any] = self.get_image_processor()
lowercase__ : Any = SamProcessor(image_processor=a )
lowercase__ : Dict = self.prepare_image_inputs()
lowercase__ : str = image_processor(a , return_tensors='np' )
lowercase__ : Tuple = processor(images=a , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Dict = SamProcessor(image_processor=a )
lowercase__ : int = [torch.ones((1, 3, 5, 5) )]
lowercase__ : Optional[int] = [[1_7_6_4, 2_6_4_6]]
lowercase__ : Any = [[6_8_3, 1_0_2_4]]
lowercase__ : Union[str, Any] = processor.post_process_masks(a , a , a )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
lowercase__ : Tuple = processor.post_process_masks(
a , torch.tensor(a ) , torch.tensor(a ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
lowercase__ : Optional[Any] = [np.ones((1, 3, 5, 5) )]
lowercase__ : str = processor.post_process_masks(a , np.array(a ) , np.array(a ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
lowercase__ : List[Any] = [[1, 0], [0, 1]]
with self.assertRaises(a ):
lowercase__ : Union[str, Any] = processor.post_process_masks(a , np.array(a ) , np.array(a ) )
@require_vision
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : List[Any] = tempfile.mkdtemp()
lowercase__ : Union[str, Any] = SamImageProcessor()
lowercase__ : Tuple = SamProcessor(a )
processor.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self , **a ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).image_processor
def _UpperCAmelCase ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : int = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowercase__ : Optional[Any] = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[str] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Tuple = self.get_image_processor(do_normalize=a , padding_value=1.0 )
lowercase__ : Any = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : int = self.get_image_processor()
lowercase__ : str = SamProcessor(image_processor=a )
lowercase__ : Union[str, Any] = self.prepare_image_inputs()
lowercase__ : Tuple = image_processor(a , return_tensors='np' )
lowercase__ : int = processor(images=a , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = self.get_image_processor()
lowercase__ : Any = SamProcessor(image_processor=a )
lowercase__ : List[Any] = [tf.ones((1, 3, 5, 5) )]
lowercase__ : Tuple = [[1_7_6_4, 2_6_4_6]]
lowercase__ : str = [[6_8_3, 1_0_2_4]]
lowercase__ : Dict = processor.post_process_masks(a , a , a , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
lowercase__ : Tuple = processor.post_process_masks(
a , tf.convert_to_tensor(a ) , tf.convert_to_tensor(a ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
lowercase__ : Tuple = [np.ones((1, 3, 5, 5) )]
lowercase__ : int = processor.post_process_masks(
a , np.array(a ) , np.array(a ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
lowercase__ : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
lowercase__ : int = processor.post_process_masks(
a , np.array(a ) , np.array(a ) , return_tensors='tf' )
@require_vision
@require_torchvision
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = tempfile.mkdtemp()
lowercase__ : Optional[int] = SamImageProcessor()
lowercase__ : int = SamProcessor(a )
processor.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self , **a ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).image_processor
def _UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowercase__ : Dict = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[Any] = self.get_image_processor()
lowercase__ : List[str] = SamProcessor(image_processor=a )
lowercase__ : int = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
lowercase__ : List[Any] = [tf.convert_to_tensor(a )]
lowercase__ : List[str] = [torch.tensor(a )]
lowercase__ : List[Any] = [[1_7_6_4, 2_6_4_6]]
lowercase__ : Union[str, Any] = [[6_8_3, 1_0_2_4]]
lowercase__ : Union[str, Any] = processor.post_process_masks(
a , a , a , return_tensors='tf' )
lowercase__ : Optional[Any] = processor.post_process_masks(
a , a , a , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : str = self.get_image_processor()
lowercase__ : List[Any] = SamProcessor(image_processor=a )
lowercase__ : str = self.prepare_image_inputs()
lowercase__ : str = image_processor(a , return_tensors='pt' )['pixel_values'].numpy()
lowercase__ : int = processor(images=a , return_tensors='pt' )['pixel_values'].numpy()
lowercase__ : str = image_processor(a , return_tensors='tf' )['pixel_values'].numpy()
lowercase__ : Tuple = processor(images=a , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(a , a ) )
self.assertTrue(np.allclose(a , a ) )
self.assertTrue(np.allclose(a , a ) )
| 645
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Tuple = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : int = []
for i in range(self.batch_size ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a )
lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a )
labels.append(a )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[str] = YolosModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = YolosForObjectDetection(a )
model.to(a )
model.eval()
lowercase__ : Dict = model(pixel_values=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
def _UpperCAmelCase ( self , a , a , a=False ) -> Dict:
lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Dict = {}
lowercase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=a , dtype=torch.long )
lowercase__ : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=a , dtype=torch.float )
labels.append(a )
lowercase__ : Union[str, Any] = labels
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = YolosModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
# in YOLOS, the seq_len is different
lowercase__ : Tuple = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Dict = len(a )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : int = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(a , a , a ):
lowercase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(a , a ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
# YOLOS has a different seq_length
lowercase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(a , a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = YolosModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , )
lowercase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a )
lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7]
lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
| 645
| 1
|
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_UpperCamelCase : Optional[Any] = TypeVar("T")
class UpperCAmelCase_ ( Generic[T]):
def __init__( self , a = True ) -> None:
lowercase__ : dict[T, list[T]] = {} # dictionary of lists
lowercase__ : str = directed
def _UpperCAmelCase ( self , a , a ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a )
self.adj_list[destination_vertex].append(a )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a )
lowercase__ : int = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(a )
lowercase__ : Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase__ : Optional[Any] = [destination_vertex]
lowercase__ : List[str] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a )
lowercase__ : Union[str, Any] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase__ : Dict = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase__ : Tuple = [destination_vertex]
lowercase__ : int = []
return self
def __repr__( self ) -> str:
return pformat(self.adj_list )
| 645
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_UpperCamelCase : int = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple:
lowercase__ : str = load_in_abit
lowercase__ : str = load_in_abit
lowercase__ : List[str] = llm_inta_threshold
lowercase__ : Dict = llm_inta_skip_modules
lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload
lowercase__ : Any = llm_inta_has_fpaa_weight
lowercase__ : Any = bnb_abit_quant_type
lowercase__ : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowercase__ : Dict = torch.floataa
elif isinstance(a , a ):
lowercase__ : Any = getattr(a , a )
elif isinstance(a , torch.dtype ):
lowercase__ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def _UpperCAmelCase ( self ) -> str:
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def _UpperCAmelCase ( self ) -> Tuple:
return self.load_in_abit or self.load_in_abit
def _UpperCAmelCase ( self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]:
lowercase__ : List[Any] = cls(**a )
lowercase__ : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _UpperCAmelCase ( self , a ) -> Dict:
with open(a , 'w' , encoding='utf-8' ) as writer:
lowercase__ : Any = self.to_dict()
lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
writer.write(a )
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def _UpperCAmelCase ( self , a = True ) -> str:
if use_diff is True:
lowercase__ : List[Any] = self.to_diff_dict()
else:
lowercase__ : List[str] = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
lowercase__ : Tuple = self.to_dict()
# get the default config dict
lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict()
lowercase__ : int = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowercase__ : Optional[int] = value
return serializable_config_dict
| 645
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Tuple = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : int = []
for i in range(self.batch_size ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a )
lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a )
labels.append(a )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[str] = YolosModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = YolosForObjectDetection(a )
model.to(a )
model.eval()
lowercase__ : Dict = model(pixel_values=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
def _UpperCAmelCase ( self , a , a , a=False ) -> Dict:
lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Dict = {}
lowercase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=a , dtype=torch.long )
lowercase__ : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=a , dtype=torch.float )
labels.append(a )
lowercase__ : Union[str, Any] = labels
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = YolosModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
# in YOLOS, the seq_len is different
lowercase__ : Tuple = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Dict = len(a )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : int = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(a , a , a ):
lowercase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(a , a ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
# YOLOS has a different seq_length
lowercase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(a , a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = YolosModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , )
lowercase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a )
lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7]
lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
| 645
|
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCamelCase : int = 16
_UpperCamelCase : Union[str, Any] = 32
def a_ ( _lowerCAmelCase : Tuple ):
'''simple docstring'''
return int(x / 2**20 )
class UpperCAmelCase_ :
def __enter__( self ) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase__ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self , *a ) -> Any:
gc.collect()
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = torch.cuda.memory_allocated()
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
lowercase__ : List[Any] = bamb(self.end - self.begin )
lowercase__ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ):
'''simple docstring'''
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
lowercase__ : Union[str, Any] = load_dataset(
'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} )
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Union[str, Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_lowerCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
lowercase__ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Optional[int] = config['lr']
lowercase__ : Optional[Any] = int(config['num_epochs'] )
lowercase__ : Optional[Any] = int(config['seed'] )
lowercase__ : int = int(config['batch_size'] )
lowercase__ : Union[str, Any] = args.model_name_or_path
set_seed(_lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
lowercase__ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ : Tuple = 0
# Now we train the model
lowercase__ : Optional[Any] = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
lowercase__ : List[Any] = model(**_lowerCAmelCase )
lowercase__ : Dict = outputs.loss
lowercase__ : int = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , )
lowercase__ : Any = parser.parse_args()
lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 645
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : list ):
'''simple docstring'''
if len(_lowerCAmelCase ) <= 1:
return [tuple(_lowerCAmelCase )]
lowercase__ : Optional[Any] = []
def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ):
lowercase__ : Optional[int] = [0] * n
res.append(tuple(_lowerCAmelCase ) )
lowercase__ : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowercase__ , lowercase__ : int = arr[i], arr[0]
else:
lowercase__ , lowercase__ : str = arr[i], arr[c[i]]
res.append(tuple(_lowerCAmelCase ) )
c[i] += 1
lowercase__ : int = 0
else:
lowercase__ : List[Any] = 0
i += 1
generate(len(_lowerCAmelCase ) , _lowerCAmelCase )
return res
if __name__ == "__main__":
_UpperCamelCase : Dict = input("Enter numbers separated by a comma:\n").strip()
_UpperCamelCase : Any = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 645
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = [0] * len(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
lowercase__ : List[str] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase__ : Dict = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase__ : Union[str, Any] = j
return prefix_result
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
return max(prefix_function(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : int = ["pixel_values"]
def __init__( self , a = True , a = None , a = None , a = PILImageResampling.BILINEAR , a = True , a = 1 / 2_5_5 , a = True , a = None , a = None , **a , ) -> None:
super().__init__(**a )
lowercase__ : Union[str, Any] = size if size is not None else {'shortest_edge': 3_8_4}
lowercase__ : int = get_size_dict(a , default_to_square=a )
lowercase__ : List[Any] = do_resize
lowercase__ : Union[str, Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowercase__ : int = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowercase__ : List[str] = resample
lowercase__ : List[Any] = do_rescale
lowercase__ : Any = rescale_factor
lowercase__ : List[str] = do_normalize
lowercase__ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self , a , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ) -> np.ndarray:
lowercase__ : List[str] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowercase__ : Tuple = size['shortest_edge']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowercase__ : List[Any] = int(shortest_edge / crop_pct )
lowercase__ : List[Any] = get_resize_output_image_size(a , size=a , default_to_square=a )
lowercase__ : Optional[Any] = resize(image=a , size=a , resample=a , data_format=a , **a )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=a , size=(shortest_edge, shortest_edge) , data_format=a , **a )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
a , size=(shortest_edge, shortest_edge) , resample=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a , a = None , **a , ) -> Dict:
return rescale(a , scale=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a , a , a = None , **a , ) -> np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> PIL.Image.Image:
lowercase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
lowercase__ : str = resample if resample is not None else self.resample
lowercase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Tuple = image_mean if image_mean is not None else self.image_mean
lowercase__ : Union[str, Any] = image_std if image_std is not None else self.image_std
lowercase__ : Dict = size if size is not None else self.size
lowercase__ : str = get_size_dict(a , default_to_square=a )
lowercase__ : List[Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase__ : Union[str, Any] = [to_numpy_array(a ) for image in images]
if do_resize:
lowercase__ : str = [self.resize(image=a , size=a , crop_pct=a , resample=a ) for image in images]
if do_rescale:
lowercase__ : List[Any] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
lowercase__ : int = [self.normalize(image=a , mean=a , std=a ) for image in images]
lowercase__ : Tuple = [to_channel_dimension_format(a , a ) for image in images]
lowercase__ : List[str] = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]:
lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0}
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : Optional[int] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : str = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = min_resolution
lowercase__ : int = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[str] = size
lowercase__ : str = do_center_crop
lowercase__ : List[Any] = crop_size
lowercase__ : Union[str, Any] = do_flip_channel_order
def _UpperCAmelCase ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Tuple = MobileViTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'center_crop' ) )
self.assertTrue(hasattr(a , 'do_flip_channel_order' ) )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> str:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 645
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Any = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_UpperCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 645
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict:
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : Optional[int] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = num_choices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_attention_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' )
lowercase__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' )
lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Any = model(a , attention_mask=a )[0]
lowercase__ : Tuple = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , a )
lowercase__ : Optional[Any] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 645
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('Input value must be a \'int\' type' )
return bin(_lowerCAmelCase ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645
|
"""simple docstring"""
from collections.abc import Sequence
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
lowercase__ : int = 0.0
for coeff in reversed(_lowerCAmelCase ):
lowercase__ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase : Dict = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 645
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
lowercase__ : List[str] = SwinConfig()
lowercase__ : Optional[Any] = swin_name.split('_' )
lowercase__ : int = name_split[1]
lowercase__ : Tuple = int(name_split[4] )
lowercase__ : Optional[int] = int(name_split[3][-1] )
if model_size == "tiny":
lowercase__ : Tuple = 96
lowercase__ : List[Any] = (2, 2, 6, 2)
lowercase__ : Tuple = (3, 6, 12, 24)
elif model_size == "small":
lowercase__ : Optional[Any] = 96
lowercase__ : List[Any] = (2, 2, 18, 2)
lowercase__ : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "base":
lowercase__ : int = 128
lowercase__ : Any = (2, 2, 18, 2)
lowercase__ : Any = (4, 8, 16, 32)
else:
lowercase__ : List[str] = 192
lowercase__ : str = (2, 2, 18, 2)
lowercase__ : Any = (6, 12, 24, 48)
if "in22k" in swin_name:
lowercase__ : Optional[Any] = 2_1841
else:
lowercase__ : str = 1000
lowercase__ : int = 'huggingface/label-files'
lowercase__ : Optional[Any] = 'imagenet-1k-id2label.json'
lowercase__ : List[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
lowercase__ : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase__ : str = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Tuple = img_size
lowercase__ : Tuple = num_classes
lowercase__ : str = embed_dim
lowercase__ : List[Any] = depths
lowercase__ : Dict = num_heads
lowercase__ : Any = window_size
return config
def a_ ( _lowerCAmelCase : List[str] ):
'''simple docstring'''
if "patch_embed.proj" in name:
lowercase__ : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowercase__ : Optional[int] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
lowercase__ : Any = 'encoder.' + name
if "attn.proj" in name:
lowercase__ : str = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase__ : int = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase__ : List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase__ : Tuple = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase__ : Optional[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase__ : List[str] = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
lowercase__ : Tuple = 'layernorm.weight'
if name == "norm.bias":
lowercase__ : Optional[int] = 'layernorm.bias'
if "head" in name:
lowercase__ : Union[str, Any] = name.replace('head' , 'classifier' )
else:
lowercase__ : Dict = 'swin.' + name
return name
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ : Union[str, Any] = orig_state_dict.pop(_lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowercase__ : List[str] = key.split('.' )
lowercase__ : List[str] = int(key_split[1] )
lowercase__ : List[Any] = int(key_split[3] )
lowercase__ : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase__ : str = val[:dim, :]
lowercase__ : Optional[int] = val[
dim : dim * 2, :
]
lowercase__ : Tuple = val[-dim:, :]
else:
lowercase__ : List[str] = val[
:dim
]
lowercase__ : Tuple = val[
dim : dim * 2
]
lowercase__ : Union[str, Any] = val[
-dim:
]
else:
lowercase__ : Union[str, Any] = val
return orig_state_dict
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
lowercase__ : Optional[Any] = get_swin_config(_lowerCAmelCase )
lowercase__ : Any = SwinForImageClassification(_lowerCAmelCase )
model.eval()
lowercase__ : Dict = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
lowercase__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ : int = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
lowercase__ : List[str] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
lowercase__ : List[str] = image_processor(images=_lowerCAmelCase , return_tensors='pt' )
lowercase__ : Tuple = timm_model(inputs['pixel_values'] )
lowercase__ : Optional[Any] = model(**_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_UpperCamelCase : Any = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 645
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_UpperCamelCase : Any = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def a_ ( _lowerCAmelCase : Optional[Any]=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a))
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = None
def _UpperCAmelCase ( self , a , a ) -> List[Any]:
with TemporaryDirectory() as tmp_dir:
lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a )
lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a )
self.assertTrue(os.path.exists(a ) )
@pytest.mark.integration
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : Optional[int] = import_main_class(dataset_module.module_path )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__ : Optional[int] = None
builder_instance.download_and_prepare()
lowercase__ : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
lowercase__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 645
| 1
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 645
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a_ ( _lowerCAmelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
'''simple docstring'''
lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowerCAmelCase , _lowerCAmelCase )
# Predict target for test data
lowercase__ : str = xgb.predict(_lowerCAmelCase )
lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 )
return predictions
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = fetch_california_housing()
lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split(
_lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 )
lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 645
| 1
|
"""simple docstring"""
_UpperCamelCase : int = range(2, 20 + 1)
_UpperCamelCase : Union[str, Any] = [10**k for k in range(ks[-1] + 1)]
_UpperCamelCase : dict[int, dict[int, list[list[int]]]] = {}
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : int = sum(a_i[j] for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ) )
lowercase__ : str = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ) , _lowerCAmelCase ) ) )
lowercase__ , lowercase__ : Optional[Any] = 0, 0
lowercase__ : List[Any] = n - i
lowercase__ : str = memo.get(_lowerCAmelCase )
if sub_memo is not None:
lowercase__ : List[Any] = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
lowercase__ : List[Any] = -1
for _k in range(len(_lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase__ : Optional[Any] = _k
break
if max_jump >= 0:
lowercase__ , lowercase__ , lowercase__ : str = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase__ : List[Any] = diff + c
for j in range(min(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
lowercase__ , lowercase__ : List[str] = divmod(_lowerCAmelCase , 10 )
if new_c > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
lowercase__ : Optional[Any] = []
else:
lowercase__ : Optional[int] = {c: []}
lowercase__ : str = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase__ , lowercase__ : Any = next_term(_lowerCAmelCase , k - 1 , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase__ , lowercase__ : List[Any] = compute(_lowerCAmelCase , _lowerCAmelCase , i + dn , _lowerCAmelCase )
diff += _diff
dn += terms_jumped
lowercase__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase__ : int = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase__ : Optional[Any] = i
lowercase__ , lowercase__ , lowercase__ : Any = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase__ : str = ds_c + ds_b
diff += addend
lowercase__ : str = 0
for j in range(_lowerCAmelCase ):
lowercase__ : List[Any] = a_i[j] + addend
lowercase__ , lowercase__ : Optional[int] = divmod(_lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return diff, i - start_i
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : str ):
'''simple docstring'''
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
lowercase__ : Union[str, Any] = digits[j] + addend
if s >= 10:
lowercase__ , lowercase__ : Union[str, Any] = divmod(_lowerCAmelCase , 10 )
lowercase__ : List[Any] = addend // 10 + quotient
else:
lowercase__ : Optional[Any] = s
lowercase__ : Tuple = addend // 10
if addend == 0:
break
while addend > 0:
lowercase__ , lowercase__ : Dict = divmod(_lowerCAmelCase , 10 )
digits.append(_lowerCAmelCase )
def a_ ( _lowerCAmelCase : int = 10**15 ):
'''simple docstring'''
lowercase__ : List[Any] = [1]
lowercase__ : str = 1
lowercase__ : Optional[int] = 0
while True:
lowercase__ , lowercase__ : str = next_term(_lowerCAmelCase , 20 , i + dn , _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
lowercase__ : str = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 645
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]:
lowercase__ : str = parent
lowercase__ : int = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = patch_size
lowercase__ : Tuple = tubelet_size
lowercase__ : Optional[int] = num_frames
lowercase__ : Optional[int] = is_training
lowercase__ : int = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = mask_ratio
lowercase__ : Optional[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase__ : Optional[Any] = (image_size // patch_size) ** 2
lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase__ : str = int(mask_ratio * self.seq_length )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : Dict = VideoMAEModel(config=a )
model.to(a )
model.eval()
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = VideoMAEForPreTraining(a )
model.to(a )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Any = torch.ones((self.num_masks,) )
lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase__ : str = model(a , a )
# model only returns predictions for masked patches
lowercase__ : str = mask.sum().item()
lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Tuple = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[int] = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : str = False
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VideoMAEModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]:
lowercase__ : Union[str, Any] = copy.deepcopy(a )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) )
lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase__ : Union[str, Any] = bool_masked_pos.to(a )
if return_labels:
if model_class in [
*get_values(a ),
]:
lowercase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = True
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase__ : Optional[Any] = True
lowercase__ : int = False
lowercase__ : Any = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : List[str] = len(a )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowercase__ : int = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(a , a , a ):
lowercase__ : Optional[int] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(a , a , a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def a_ ( ):
'''simple docstring'''
lowercase__ : int = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
lowercase__ : str = np.load(_lowerCAmelCase )
return list(_lowerCAmelCase )
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
a )
lowercase__ : str = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**a )
# verify the logits
lowercase__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a )
# add boolean mask, indicating which patches to mask
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
lowercase__ : str = torch.load(a )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**a )
# verify the logits
lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowercase__ : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a )
self.assertEqual(outputs.logits.shape , a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to(
a )
with torch.no_grad():
lowercase__ : Any = model(**a )
lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a )
self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
| 645
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.