code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str=13 , UpperCamelCase__ : List[Any]=7 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : List[Any]=99 , UpperCamelCase__ : List[Any]=64 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : str=4 , UpperCamelCase__ : List[str]=37 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : int=None , ) -> List[Any]:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = embedding_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
def _lowercase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def _lowercase ( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = MobileBertModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = MobileBertForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = MobileBertForNextSentencePrediction(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowercase ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__magic_name__ = MobileBertForPreTraining(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , next_sentence_label=UpperCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowercase ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
__magic_name__ = MobileBertForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ) -> int:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = MobileBertForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = MobileBertForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.num_choices
__magic_name__ = MobileBertForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any=False ) -> Tuple:
"""simple docstring"""
__magic_name__ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = MobileBertModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : List[str] ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCamelCase__ )
def _lowercase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCamelCase__ )
def _lowercase ( self : int ) -> int:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCamelCase__ )
def _lowercase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCamelCase__ )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCamelCase__ )
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCamelCase__ )
def a__ ( A_ ):
'''simple docstring'''
return torch.tensor(
A_, dtype=torch.long, device=A_, )
__lowerCAmelCase : Optional[Any] = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : str ) -> List[str]:
"""simple docstring"""
__magic_name__ = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(UpperCamelCase__ )
__magic_name__ = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
__magic_name__ = model(UpperCamelCase__ )[0]
__magic_name__ = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , UpperCamelCase__ )
__magic_name__ = torch.tensor(
[
[
[-2.473_6526E07, 8.269_1656E04, 1.652_1838E05],
[-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00],
[2.604_7359E00, 1.567_7652E00, -1.732_4188E-01],
]
] , device=UpperCamelCase__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__magic_name__ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__magic_name__ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 88 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCAmelCase__ :
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__lowerCamelCase = img
__lowerCamelCase = img.shape[1]
__lowerCamelCase = img.shape[0]
__lowerCamelCase = dst_width
__lowerCamelCase = dst_height
__lowerCamelCase = self.src_w / self.dst_w
__lowerCamelCase = self.src_h / self.dst_h
__lowerCamelCase = __lowerCamelCase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def __A ( self : List[Any] ) -> str:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowerCamelCase = self.img[self.get_y(SCREAMING_SNAKE_CASE__ )][self.get_x(SCREAMING_SNAKE_CASE__ )]
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_x * x )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = 800, 600
SCREAMING_SNAKE_CASE__ : int = imread("image_data/lena.jpg", 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 270 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCamelCase: Tuple = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
_UpperCamelCase: Union[str, Any] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_UpperCamelCase: Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowercase__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f:
lowercase : Tuple = Image.open(_UpperCAmelCase )
return im.convert('RGB' )
@dataclass
class a__ :
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
}, )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'A folder containing the training data.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'A folder containing the validation data.'} )
_lowerCamelCase = field(
default=0.1_5, metadata={'help': 'Percent to split off of train for validation.'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
def lowercase ( self : Tuple ) -> Tuple:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class a__ :
_lowerCamelCase = field(
default='google/vit-base-patch16-224-in21k', metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}, )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(SCREAMING_SNAKE_CASE__ )}, )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
_lowerCamelCase = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Name or path of preprocessor config.'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'}, )
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase : Optional[int] = torch.stack([example['pixel_values'] for example in examples] )
lowercase : Dict = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowercase__ ( ) -> Any:
'''simple docstring'''
lowercase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , _UpperCAmelCase , _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowercase : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase : Optional[Any] = {}
if data_args.train_dir is not None:
lowercase : Optional[int] = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
lowercase : Any = os.path.join(data_args.validation_dir , '**' )
lowercase : List[str] = load_dataset(
'imagefolder' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase : int = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCAmelCase ) and data_args.train_val_split > 0.0:
lowercase : Optional[int] = dataset['train'].train_test_split(data_args.train_val_split )
lowercase : int = split['train']
lowercase : str = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase : Optional[int] = dataset['train'].features['labels'].names
lowercase : List[Any] = {}, {}
for i, label in enumerate(_UpperCAmelCase ):
lowercase : Optional[Any] = str(_UpperCAmelCase )
lowercase : str = label
# Load the accuracy metric from the datasets package
lowercase : Any = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCAmelCase ) , labelaid=_UpperCAmelCase , idalabel=_UpperCAmelCase , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase : List[Any] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowercase : List[Any] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowercase : Union[str, Any] = image_processor.size['shortest_edge']
else:
lowercase : Optional[Any] = (image_processor.size['height'], image_processor.size['width'])
lowercase : int = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowercase : str = Compose(
[
RandomResizedCrop(_UpperCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowercase : Optional[Any] = Compose(
[
Resize(_UpperCAmelCase ),
CenterCrop(_UpperCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCAmelCase ):
lowercase : int = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(_UpperCAmelCase ):
lowercase : Tuple = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowercase : int = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowercase : Optional[int] = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCAmelCase )
# Initalize our trainer
lowercase : Union[str, Any] = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
lowercase : int = None
if training_args.resume_from_checkpoint is not None:
lowercase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase : Tuple = last_checkpoint
lowercase : List[Any] = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase : str = trainer.evaluate()
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
# Write model card and (optionally) push to hub
lowercase : Optional[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
if __name__ == "__main__":
main()
| 370 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase: Dict = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Optional[int] = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_UpperCamelCase: List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[str] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase : str = """AutoImageProcessor"""
UpperCAmelCase : Optional[int] = """AutoTokenizer"""
def __init__( self : str , __UpperCAmelCase : Any=None , __UpperCAmelCase : str=None , **__UpperCAmelCase : Dict):
a : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
a : Tuple = kwargs.pop("feature_extractor")
a : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
a : Any = self.image_processor
a : Any = False
def __call__( self : Optional[int] , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : int):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase)
a : List[Any] = kwargs.pop("images" , __UpperCAmelCase)
a : Dict = kwargs.pop("text" , __UpperCAmelCase)
if len(__UpperCAmelCase) > 0:
a : Tuple = args[0]
a : List[str] = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process.")
if images is not None:
a : List[Any] = self.image_processor(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase)
if text is not None:
a : List[str] = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase)
if text is None:
return inputs
elif images is None:
return encodings
else:
a : Tuple = encodings["input_ids"]
return inputs
def __snake_case ( self : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Optional[int]):
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : str , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Dict):
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase)
@contextmanager
def __snake_case ( self : List[str]):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call.")
a : Dict = True
a : Any = self.tokenizer
yield
a : Optional[Any] = self.image_processor
a : List[Any] = False
def __snake_case ( self : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : str=None):
if added_vocab is None:
a : Optional[int] = self.tokenizer.get_added_vocab()
a : List[Any] = {}
while tokens:
a : str = re.search(r"<s_(.*?)>" , __UpperCAmelCase , re.IGNORECASE)
if start_token is None:
break
a : Optional[int] = start_token.group(1)
a : Optional[Any] = re.search(rf'''</s_{key}>''' , __UpperCAmelCase , re.IGNORECASE)
a : Any = start_token.group()
if end_token is None:
a : Optional[Any] = tokens.replace(__UpperCAmelCase , "")
else:
a : int = end_token.group()
a : Tuple = re.escape(__UpperCAmelCase)
a : str = re.escape(__UpperCAmelCase)
a : Dict = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , __UpperCAmelCase , re.IGNORECASE)
if content is not None:
a : Tuple = content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
a : Any = self.tokenajson(__UpperCAmelCase , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase)
if value:
if len(__UpperCAmelCase) == 1:
a : str = value[0]
a : Union[str, Any] = value
else: # leaf nodes
a : int = []
for leaf in content.split(r"<sep/>"):
a : List[str] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
a : Union[str, Any] = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCAmelCase)
if len(output[key]) == 1:
a : Tuple = output[key][0]
a : List[Any] = tokens[tokens.find(__UpperCAmelCase) + len(__UpperCAmelCase) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase)
if len(__UpperCAmelCase):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __snake_case ( self : str):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def __snake_case ( self : int):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 40 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
class _A :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : int):
a : List[Any] = metric_id
class _A :
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __snake_case ( self : List[str]):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
if "tmp_path" in args:
a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ):
func(*A_ )
| 40 | 1 |
import math
class _a :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : Union[str, Any]=0 ): # a graph with Node 0,1,...,N-1
A_ = n
A_ = [
[math.inf for j in range(0 , UpperCAmelCase )] for i in range(0 , UpperCAmelCase )
] # adjacency matrix for weight
A_ = [
[math.inf for j in range(0 , UpperCAmelCase )] for i in range(0 , UpperCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def __A ( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : str ):
A_ = w
def __A ( self : str ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A_ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __A ( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] ):
return self.dp[u][v]
if __name__ == "__main__":
__a :int = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 350 |
from maths.prime_factors import prime_factors
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCamelCase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod() | 329 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a__ = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
a__ = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
a__ = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : int) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Value("""string""" , id="""sequence"""),
}) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : int=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]="auto" , lowerCAmelCase : str=-1 , lowerCAmelCase : Union[str, Any]=0.9 , lowerCAmelCase : Union[str, Any]=5 , lowerCAmelCase : Optional[int]=500 , lowerCAmelCase : Optional[int]="gpt2-large" , lowerCAmelCase : Any=-1 , lowerCAmelCase : Any=1024 , lowerCAmelCase : Union[str, Any]=25 , lowerCAmelCase : Union[str, Any]=5 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Dict=25 , ) -> Tuple:
"""simple docstring"""
_snake_case : str = compute_mauve(
p_text=_SCREAMING_SNAKE_CASE , q_text=_SCREAMING_SNAKE_CASE , p_features=_SCREAMING_SNAKE_CASE , q_features=_SCREAMING_SNAKE_CASE , p_tokens=_SCREAMING_SNAKE_CASE , q_tokens=_SCREAMING_SNAKE_CASE , num_buckets=_SCREAMING_SNAKE_CASE , pca_max_data=_SCREAMING_SNAKE_CASE , kmeans_explained_var=_SCREAMING_SNAKE_CASE , kmeans_num_redo=_SCREAMING_SNAKE_CASE , kmeans_max_iter=_SCREAMING_SNAKE_CASE , featurize_model_name=_SCREAMING_SNAKE_CASE , device_id=_SCREAMING_SNAKE_CASE , max_text_length=_SCREAMING_SNAKE_CASE , divergence_curve_discretization_size=_SCREAMING_SNAKE_CASE , mauve_scaling_factor=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , seed=_SCREAMING_SNAKE_CASE , )
return out
| 317 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = list[tuple[int, int]]
lowerCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = pos_x
__lowerCAmelCase : Optional[Any] = pos_y
__lowerCAmelCase : Optional[int] = (pos_y, pos_x)
__lowerCAmelCase : Union[str, Any] = goal_x
__lowerCAmelCase : Any = goal_y
__lowerCAmelCase : Optional[Any] = g_cost
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Union[str, Any] = self.calculate_heuristic()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = abs(self.pos_x - self.goal_x )
__lowerCAmelCase : str = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _SCREAMING_SNAKE_CASE ):
return self.f_cost < other.f_cost
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [self.start]
__lowerCAmelCase : list[Node] = []
__lowerCAmelCase : str = False
def __lowerCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase : Union[str, Any] = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
__lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = []
for action in delta:
__lowerCAmelCase : Optional[int] = parent.pos_x + action[1]
__lowerCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = node
__lowerCAmelCase : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowerCamelCase__ = GreedyBestFirst(init, goal)
lowerCamelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCamelCase__ = 2
for elem in grid:
print(elem) | 86 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'vocab_file': 'spiece.model'}
lowerCAmelCase = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
lowerCAmelCase = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class _a ( __lowercase ):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any=False , UpperCamelCase_: List[str]=False , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: List[str]=None , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Any=None , UpperCamelCase_: Any = None , **UpperCamelCase_: List[Any] , ) -> None:
"""simple docstring"""
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase__ = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
lowercase__ = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowercase__ = '''<|endoftext|>''' if eos_token is None else eos_token
lowercase__ = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowercase__ = unk_token if pad_token is None else pad_token
lowercase__ = eos_token if bos_token is None else bos_token
else:
lowercase__ = '''<pad>''' if pad_token is None else pad_token
lowercase__ = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
# Used for whitespace normalization in input texts
# fmt : off
lowercase__ = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowercase__ = re.compile(
f'[{"".join(map(_a , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]' )
def __getstate__( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self: List[str] , UpperCamelCase_: List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.non_printing_characters_re.sub('''''' , _a )
# Normalize whitespaces
lowercase__ = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
lowercase__ = unicodedata.normalize('''NFC''' , _a )
return text
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Dict , **UpperCamelCase_: Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ = self.preprocess_text(_a )
return self.sp_model.encode(_a , out_type=_a )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Dict ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(_a )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: int ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(_a )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[str, Any] ) -> str:
"""simple docstring"""
return out_string
def lowerCamelCase_ ( self: str , UpperCamelCase_: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''''''
lowercase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
lowercase__ = True
lowercase__ = []
else:
current_sub_tokens.append(_a )
lowercase__ = False
out_string += self.sp_model.decode(_a )
return out_string
def lowerCamelCase_ ( self: Tuple ) -> Dict[str, int]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(_a , _a ):
lowercase__ = self.preprocess_text(_a )
lowercase__ = self.sp_model.encode(_a )
else:
lowercase__ = [self.preprocess_text(_a ) for t in text]
lowercase__ = self.sp_model.encode(_a )
if return_tensors is True or return_tensors == "pt":
lowercase__ = torch.tensor(_a )
return token_ids
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> str:
"""simple docstring"""
return self.sp_model.decode(_a )
def lowerCamelCase_ ( self: str , UpperCamelCase_: Optional[int] ) -> List[int]:
"""simple docstring"""
lowercase__ = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowercase__ = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(_a ) + f'{self.bos_token}Bot:'
)
return self.encode(text=_a )
| 365 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = AltDiffusionPipeline
_lowercase : Tuple = TEXT_TO_IMAGE_PARAMS
_lowercase : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowercase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowercase : int = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
lowercase__ = CLIPTextModel(UpperCamelCase_ )
lowercase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowercase__ = 77
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: str=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase_ ( self: Dict ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase_ ( self: Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
torch.manual_seed(0 )
lowercase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase__ = RobertaSeriesModelWithTransformation(UpperCamelCase_ )
lowercase__ = text_encoder
lowercase__ = AltDiffusionPipeline(**UpperCamelCase_ )
lowercase__ = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = self.get_dummy_inputs(UpperCamelCase_ )
lowercase__ = '''A photo of an astronaut'''
lowercase__ = alt_pipe(**UpperCamelCase_ )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
lowercase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase__ = RobertaSeriesModelWithTransformation(UpperCamelCase_ )
lowercase__ = text_encoder
lowercase__ = AltDiffusionPipeline(**UpperCamelCase_ )
lowercase__ = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = self.get_dummy_inputs(UpperCamelCase_ )
lowercase__ = alt_pipe(**UpperCamelCase_ )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=UpperCamelCase_ )
lowercase__ = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = alt_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowercase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowercase__ = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = alt_pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''numpy''' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 93 | 0 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCamelCase_ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = eval_examples
UpperCamelCase__ = post_process_function
UpperCamelCase__ = quant_trainer_args
UpperCamelCase__ = 1_28 # default number of calibration samples
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
UpperCamelCase__ = calib_dataset if calib_dataset is not None else self.calib_dataset
UpperCamelCase__ = self._remove_unused_columns(SCREAMING_SNAKE_CASE_ , description="""Calibration""" )
return DataLoader(
SCREAMING_SNAKE_CASE_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=SCREAMING_SNAKE_CASE_ , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=None ):
UpperCamelCase__ = self.train_dataset if calib_dataset is None else calib_dataset
UpperCamelCase__ = self.get_calib_dataloader(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.model
quant_trainer.configure_model(SCREAMING_SNAKE_CASE_ , self.quant_trainer_args , calib=SCREAMING_SNAKE_CASE_ )
model.eval()
quant_trainer.enable_calibration(SCREAMING_SNAKE_CASE_ )
logger.info("""***** Running calibration *****""" )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(SCREAMING_SNAKE_CASE_ ):
# Prediction step
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.prediction_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prediction_loss_only=SCREAMING_SNAKE_CASE_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(SCREAMING_SNAKE_CASE_ , self.quant_trainer_args )
UpperCamelCase__ = model
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = "eval" ):
UpperCamelCase__ = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase__ = self.get_eval_dataloader(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase__ = self.compute_metrics
UpperCamelCase__ = None
UpperCamelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase__ = eval_loop(
SCREAMING_SNAKE_CASE_ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE_ , )
finally:
UpperCamelCase__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
UpperCamelCase__ = self.post_process_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , output.predictions )
UpperCamelCase__ = self.compute_metrics(SCREAMING_SNAKE_CASE_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
UpperCamelCase__ = metrics.pop(SCREAMING_SNAKE_CASE_ )
self.log(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , SCREAMING_SNAKE_CASE_ )
return metrics
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = "test" ):
UpperCamelCase__ = self.get_test_dataloader(SCREAMING_SNAKE_CASE_ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase__ = self.compute_metrics
UpperCamelCase__ = None
UpperCamelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase__ = eval_loop(
SCREAMING_SNAKE_CASE_ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE_ , )
finally:
UpperCamelCase__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase__ = self.post_process_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , output.predictions , """predict""" )
UpperCamelCase__ = self.compute_metrics(SCREAMING_SNAKE_CASE_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
UpperCamelCase__ = metrics.pop(SCREAMING_SNAKE_CASE_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_="./" ):
UpperCamelCase__ = self.eval_dataset
UpperCamelCase__ = self.get_eval_dataloader(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = next(iter(SCREAMING_SNAKE_CASE_ ) )
# saving device - to make it consistent
UpperCamelCase__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
UpperCamelCase__ = tuple(v.to(SCREAMING_SNAKE_CASE_ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
UpperCamelCase__ = True
UpperCamelCase__ = self.model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
model.float()
UpperCamelCase__ = model.module if hasattr(SCREAMING_SNAKE_CASE_ , """module""" ) else model
quant_trainer.configure_model(SCREAMING_SNAKE_CASE_ , self.quant_trainer_args )
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , """model.onnx""" )
logger.info(F"exporting model to {output_model_file}" )
UpperCamelCase__ = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , export_params=SCREAMING_SNAKE_CASE_ , opset_version=13 , do_constant_folding=SCREAMING_SNAKE_CASE_ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=SCREAMING_SNAKE_CASE_ , )
logger.info("""onnx export finished""" )
| 244 |
import torch
from diffusers import StableDiffusionPipeline
lowerCamelCase_ = '''path-to-your-trained-model'''
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowerCamelCase_ = '''A photo of sks dog in a bucket'''
lowerCamelCase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 244 | 1 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__UpperCamelCase = True
except (ImportError, AttributeError):
__UpperCamelCase = object
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
pass
__UpperCamelCase = False
__UpperCamelCase = logging.get_logger("transformers-cli/serving")
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(SCREAMING_SNAKE_CASE_ , args.host , args.port , args.workers )
class _A ( snake_case__ ):
lowercase__: dict
class _A ( snake_case__ ):
lowercase__: List[str]
lowercase__: Optional[List[int]]
class _A ( snake_case__ ):
lowercase__: str
class _A ( snake_case__ ):
lowercase__: Any
class _A ( snake_case__ ):
@staticmethod
def lowercase__ ( __magic_name__ : Any ) -> int:
"""simple docstring"""
__snake_case : List[str] = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=_A , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=_A , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=_A , default=88_88 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=_A , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=_A , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=_A , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=_A , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=_A , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=_A )
def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = pipeline
__snake_case : Optional[int] = host
__snake_case : str = port
__snake_case : Union[str, Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(f'''Serving model over {host}:{port}''' )
__snake_case : Any = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=_A , response_class=_A , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=_A , response_class=_A , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=_A , response_class=_A , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=_A , response_class=_A , methods=["""POST"""] , ),
] , timeout=6_00 , )
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowercase__ ( self : Tuple , __magic_name__ : int = Body(_A , embed=_A ) , __magic_name__ : str = Body(_A , embed=_A ) ) -> str:
"""simple docstring"""
try:
__snake_case : List[str] = self._pipeline.tokenizer.tokenize(_A )
if return_ids:
__snake_case : Optional[int] = self._pipeline.tokenizer.convert_tokens_to_ids(_A )
return ServeTokenizeResult(tokens=_A , tokens_ids=_A )
else:
return ServeTokenizeResult(tokens=_A )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(_A )} )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Union[str, Any] = Body(_A , embed=_A ) , __magic_name__ : Optional[Any] = Body(_A , embed=_A ) , __magic_name__ : Optional[Any] = Body(_A , embed=_A ) , ) -> Any:
"""simple docstring"""
try:
__snake_case : Optional[int] = self._pipeline.tokenizer.decode(_A , _A , _A )
return ServeDeTokenizeResult(model="""""" , text=_A )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(_A )} )
async def lowercase__ ( self : str , __magic_name__ : Optional[Any]=Body(_A , embed=_A ) ) -> Union[str, Any]:
"""simple docstring"""
if len(_A ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__snake_case : List[Any] = self._pipeline(_A )
return ServeForwardResult(output=_A )
except Exception as e:
raise HTTPException(5_00 , {"""error""": str(_A )} )
| 363 |
'''simple docstring'''
__UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Dict = input("""Enter message: """ )
__snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ )
__snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__snake_case : Any = """encrypt"""
__snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase )
elif mode.lower().startswith("""d""" ):
__snake_case : Optional[int] = """decrypt"""
__snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase )
print(F'''\n{mode.title()}ed message:''' )
print(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : str = []
__snake_case : Dict = 0
__snake_case : Optional[int] = key.upper()
for symbol in message:
__snake_case : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowerCamelCase ):
__snake_case : Tuple = 0
else:
translated.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__UpperCAmelCase ="CompVis/stable-diffusion-v1-1"
__UpperCAmelCase ="CompVis/stable-diffusion-v1-2"
__UpperCAmelCase ="CompVis/stable-diffusion-v1-3"
__UpperCAmelCase ="CompVis/stable-diffusion-v1-4"
class a__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , a : AutoencoderKL , a : CLIPTextModel , a : CLIPTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , a : StableDiffusionSafetyChecker , a : CLIPImageProcessor , a : bool = True , ):
"""simple docstring"""
super()._init_()
__lowerCamelCase = StableDiffusionPipeline.from_pretrained(a )
__lowerCamelCase = StableDiffusionPipeline.from_pretrained(a )
__lowerCamelCase = StableDiffusionPipeline.from_pretrained(a )
__lowerCamelCase = StableDiffusionPipeline(
vae=a , text_encoder=a , tokenizer=a , unet=a , scheduler=a , safety_checker=a , feature_extractor=a , requires_safety_checker=a , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return {k: getattr(self , a ) for k in self.config.keys() if not k.startswith('''_''' )}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
self.enable_attention_slicing(a )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Union[str, List[str]] , a : int = 5_12 , a : int = 5_12 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : Union[str, Any] , ):
"""simple docstring"""
return self.pipea(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : Union[str, List[str]] , a : int = 5_12 , a : int = 5_12 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : Tuple , ):
"""simple docstring"""
return self.pipea(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : Union[str, List[str]] , a : int = 5_12 , a : int = 5_12 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : str , ):
"""simple docstring"""
return self.pipea(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Union[str, List[str]] , a : int = 5_12 , a : int = 5_12 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : Tuple , ):
"""simple docstring"""
return self.pipea(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Union[str, List[str]] , a : int = 5_12 , a : int = 5_12 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : str , ):
"""simple docstring"""
__lowerCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(a )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowerCamelCase = self.textaimg_sda_a(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
# Get first result from Stable Diffusion Checkpoint v1.2
__lowerCamelCase = self.textaimg_sda_a(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
# Get first result from Stable Diffusion Checkpoint v1.3
__lowerCamelCase = self.textaimg_sda_a(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
# Get first result from Stable Diffusion Checkpoint v1.4
__lowerCamelCase = self.textaimg_sda_a(
prompt=a , height=a , width=a , num_inference_steps=a , guidance_scale=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , output_type=a , return_dict=a , callback=a , callback_steps=a , **a , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 67 |
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class a__:
def __init__( self : Tuple ):
a : List[Any] = {}
def lowercase_ ( self : str , __snake_case : str ):
a : Union[str, Any] = {}
def lowercase_ ( self : List[Any] , __snake_case : str , __snake_case : str , __snake_case : float ):
if nodea not in self.connections:
self.add_node(__snake_case )
if nodea not in self.connections:
self.add_node(__snake_case )
a : Dict = probability
def lowercase_ ( self : Tuple ):
return list(self.connections )
def lowercase_ ( self : List[Any] , __snake_case : str ):
a : str = 0
a : Optional[Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCamelCase__ ( _A , _A , _A ):
a : Optional[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_A , _A , _A )
a : Tuple = Counter(graph.get_nodes() )
a : Tuple = start
for _ in range(_A ):
a : Union[str, Any] = graph.transition(_A )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 |
'''simple docstring'''
lowerCAmelCase: Union[str, Any] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
lowerCAmelCase: Optional[Any] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 1_2,
'Pm': 1_5,
'Em': 1_8,
'Zm': 2_1,
'Ym': 2_4,
}
def lowerCamelCase__ ( _A , _A , _A ):
a : Optional[int] = from_type.lower().strip('s' )
a : Optional[Any] = to_type.lower().strip('s' )
a : Dict = UNIT_SYMBOL.get(_A , _A )
a : Optional[Any] = UNIT_SYMBOL.get(_A , _A )
if from_sanitized not in METRIC_CONVERSION:
a : Optional[Any] = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_A )}"""
)
raise ValueError(_A )
if to_sanitized not in METRIC_CONVERSION:
a : Union[str, Any] = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_A )}"""
)
raise ValueError(_A )
a : List[Any] = METRIC_CONVERSION[from_sanitized]
a : int = METRIC_CONVERSION[to_sanitized]
a : Tuple = 1
if from_exponent > to_exponent:
a : Optional[int] = from_exponent - to_exponent
else:
a : Optional[Any] = -(to_exponent - from_exponent)
return value * pow(10 , _A )
if __name__ == "__main__":
from doctest import testmod
testmod() | 96 | 1 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowercase = logging.get_logger(__name__)
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , *a , **a ) -> None:
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 178 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowercase = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''whether to use adafactor'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 178 | 1 |
def a_ ( __lowercase : List[Any] ) -> Tuple:
_snake_case = len(__lowercase )
for i in range(length - 1 ):
_snake_case = i
for k in range(i + 1 , __lowercase ):
if collection[k] < collection[least]:
_snake_case = k
if least != i:
_snake_case , _snake_case = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_lowerCamelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCamelCase : List[Any] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted)) | 130 |
def a_ ( __lowercase : list[int] , __lowercase : list[int] ) -> tuple[float, float]:
# Check if the input is valid
if not len(__lowercase ) == len(__lowercase ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_snake_case , _snake_case , _snake_case = equationa
_snake_case , _snake_case , _snake_case = equationa
# Calculate the determinants of the matrices
_snake_case = aa * ba - aa * ba
_snake_case = ca * ba - ca * ba
_snake_case = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_snake_case = determinant_x / determinant
_snake_case = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y) | 130 | 1 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_snake_case = logging.get_logger('transformers.models.speecht5')
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
hf_model.apply_weight_norm()
_a : Dict = checkpoint["""input_conv.weight_g"""]
_a : Tuple = checkpoint["""input_conv.weight_v"""]
_a : Optional[int] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
_a : Any = checkpoint[F"""upsamples.{i}.1.weight_g"""]
_a : Dict = checkpoint[F"""upsamples.{i}.1.weight_v"""]
_a : Dict = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_a : str = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
_a : Optional[Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
_a : Optional[Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
_a : Optional[int] = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
_a : str = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
_a : Union[str, Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
_a : str = checkpoint["""output_conv.1.weight_g"""]
_a : Tuple = checkpoint["""output_conv.1.weight_v"""]
_a : Dict = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
_a : Any = SpeechTaHifiGanConfig.from_pretrained(A_ )
else:
_a : int = SpeechTaHifiGanConfig()
_a : int = SpeechTaHifiGan(A_ )
_a : List[str] = torch.load(A_ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , A_ , A_ )
_a : List[str] = np.load(A_ )
_a : Any = stats[0].reshape(-1 )
_a : int = stats[1].reshape(-1 )
_a : str = torch.from_numpy(A_ ).float()
_a : Optional[Any] = torch.from_numpy(A_ ).float()
model.save_pretrained(A_ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(A_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_snake_case = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 294 |
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(A_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(A_ ) == 1:
return True
__magic_name__ = series[1] - series[0]
for index in range(len(A_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(A_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
__magic_name__ = 0
for val in series:
answer += val
return answer / len(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
from collections.abc import Callable
import numpy as np
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Callable , __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ) -> np.array:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE__ = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__ = ya
SCREAMING_SNAKE_CASE__ = xa
for k in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = y[k] + step_size * ode_func(__UpperCamelCase , y[k] )
SCREAMING_SNAKE_CASE__ = y[k] + (
(step_size / 2) * (ode_func(__UpperCamelCase , y[k] ) + ode_func(x + step_size , __UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 | from math import isqrt
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = False
return [i for i in range(2 , __UpperCamelCase ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**8 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 204 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = filter(lambda SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
__UpperCamelCase :List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowercase = logging.getLogger(__name__)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if metric == "rouge2":
__UpperCamelCase :Any = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__UpperCamelCase :List[Any] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__UpperCamelCase :Union[str, Any] = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
__UpperCamelCase :List[Any] = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE , filename=SCREAMING_SNAKE_CASE , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , )
class lowerCamelCase_ ( pl.Callback ):
'''simple docstring'''
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[str] = {f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(__lowercase)
@rank_zero_only
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase=True) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""")
__UpperCamelCase :Dict = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']})
# Log results
__UpperCamelCase :Tuple = Path(pl_module.hparams.output_dir)
if type_path == "test":
__UpperCamelCase :str = od / '''test_results.txt'''
__UpperCamelCase :Optional[Any] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCamelCase :List[Any] = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__UpperCamelCase :Optional[int] = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__lowercase)
generations_file.parent.mkdir(exist_ok=__lowercase)
with open(__lowercase , '''a+''') as writer:
for key in sorted(__lowercase):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCamelCase :Union[str, Any] = metrics[key]
if isinstance(__lowercase , torch.Tensor):
__UpperCamelCase :List[Any] = val.item()
__UpperCamelCase :Optional[Any] = f"""{key}: {val:.6f}\n"""
writer.write(__lowercase)
if not save_generations:
return
if "preds" in metrics:
__UpperCamelCase :Union[str, Any] = '''\n'''.join(metrics['''preds'''])
generations_file.open('''w+''').write(__lowercase)
@rank_zero_only
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> List[Any]:
try:
__UpperCamelCase :int = pl_module.model.model.num_parameters()
except AttributeError:
__UpperCamelCase :Tuple = pl_module.model.num_parameters()
__UpperCamelCase :Optional[Any] = count_trainable_parameters(__lowercase)
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6})
@rank_zero_only
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(__lowercase , __lowercase , '''test''')
@rank_zero_only
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 43 |
import doctest
from collections import deque
import numpy as np
class snake_case__ :
def __init__( self ) -> None:
__magic_name__ : Any = [2, 1, 2, -1]
__magic_name__ : Tuple = [1, 2, 3, 4]
def __magic_name__ ( self ) -> list[float]:
__magic_name__ : Optional[Any] = len(self.first_signal )
__magic_name__ : Dict = len(self.second_signal )
__magic_name__ : Tuple = max(lowerCAmelCase__ , lowerCAmelCase__ )
# create a zero matrix of max_length x max_length
__magic_name__ : Optional[int] = [[0] * max_length for i in range(lowerCAmelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase__ ):
__magic_name__ : List[str] = deque(self.second_signal )
rotated_signal.rotate(lowerCAmelCase__ )
for j, item in enumerate(lowerCAmelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__magic_name__ : List[Any] = np.matmul(np.transpose(lowerCAmelCase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCAmelCase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 342 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Union[str, Any] = logging.get_logger(__name__)
__A : int = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
lowercase : Optional[int] = 'bit'
lowercase : Any = ['preactivation', 'bottleneck']
lowercase : Any = ['SAME', 'VALID']
def __init__( self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=[256, 512, 1024, 2048] , SCREAMING_SNAKE_CASE_=[3, 4, 6, 3] , SCREAMING_SNAKE_CASE_="preactivation" , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
UpperCamelCase : str = global_padding.upper()
else:
raise ValueError(f'Padding strategy {global_padding} not supported' )
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : str = embedding_size
UpperCamelCase : List[Any] = hidden_sizes
UpperCamelCase : Any = depths
UpperCamelCase : Optional[int] = layer_type
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Any = global_padding
UpperCamelCase : Dict = num_groups
UpperCamelCase : Optional[int] = drop_path_rate
UpperCamelCase : Dict = embedding_dynamic_padding
UpperCamelCase : str = output_stride
UpperCamelCase : str = width_factor
UpperCamelCase : int = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )]
UpperCamelCase : List[str] = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 369 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def a_ ( self ):
UpperCamelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
UpperCamelCase : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
UpperCamelCase : Dict = """xvjiarui/stable-diffusion-2-inpainting"""
UpperCamelCase , UpperCamelCase : List[str] = FlaxStableDiffusionInpaintPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = """Face of a yellow cat, high resolution, sitting on a park bench"""
UpperCamelCase : List[str] = jax.random.PRNGKey(0 )
UpperCamelCase : Tuple = 50
UpperCamelCase : Dict = jax.device_count()
UpperCamelCase : Optional[int] = num_samples * [prompt]
UpperCamelCase : int = num_samples * [init_image]
UpperCamelCase : List[Any] = num_samples * [mask_image]
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# shard inputs and rng
UpperCamelCase : Optional[int] = replicate(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count() )
UpperCamelCase : str = shard(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = shard(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = shard(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = pipeline(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , jit=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = output.images.reshape(SCREAMING_SNAKE_CASE_ , 512 , 512 , 3 )
UpperCamelCase : List[Any] = images[0, 253:256, 253:256, -1]
UpperCamelCase : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase : Dict = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 27 | 0 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
SCREAMING_SNAKE_CASE : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase]
SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS}
SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase ( _snake_case : Union[str, Any] , _snake_case : Dict ) ->str | None:
"""simple docstring"""
__snake_case : str = ''''''
__snake_case : Optional[int] = 42
__snake_case : Any = 42
__snake_case : Dict = 42
for keychar, cipherchar in zip(cycle(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ):
__snake_case : str = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_SCREAMING_SNAKE_CASE )
return decoded
def lowercase ( _snake_case : List[Any] ) ->list[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = []
for key in product(_SCREAMING_SNAKE_CASE , repeat=3 ):
__snake_case : Union[str, Any] = try_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if encoded is not None:
possibles.append(_SCREAMING_SNAKE_CASE )
return possibles
def lowercase ( _snake_case : str , _snake_case : Union[str, Any] ) ->list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase ( _snake_case : List[str] = "p059_cipher.txt" ) ->int:
"""simple docstring"""
__snake_case : int = 42
__snake_case : int = 42
__snake_case : str = 42
__snake_case : Dict = 42
__snake_case : Optional[Any] = Path(_SCREAMING_SNAKE_CASE ).parent.joinpath(_SCREAMING_SNAKE_CASE ).read_text(encoding='''utf-8''' )
__snake_case : Union[str, Any] = [int(_SCREAMING_SNAKE_CASE ) for number in data.strip().split(''',''' )]
__snake_case : List[str] = filter_valid_chars(_SCREAMING_SNAKE_CASE )
for common_word in COMMON_WORDS:
__snake_case : Optional[Any] = filter_common_word(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
__snake_case : Optional[int] = possibles[0]
return sum(ord(_SCREAMING_SNAKE_CASE ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 102 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10)
def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class __A :
'''simple docstring'''
__lowercase: int
__lowercase: float
__lowercase: str
__lowercase: bool
@dataclass
class __A :
'''simple docstring'''
__lowercase: int = 42
__lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: bool = False
__lowercase: bool = True
__lowercase: Optional[bool] = None
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = """titi"""
__lowercase: Any = """toto"""
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """titi"""
__lowercase: Optional[Any] = """toto"""
__lowercase: List[Any] = 42
@dataclass
class __A :
'''simple docstring'''
__lowercase: BasicEnum = "toto"
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
snake_case_ = BasicEnum(self.foo )
@dataclass
class __A :
'''simple docstring'''
__lowercase: MixedTypeEnum = "toto"
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[int] = None
__lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""})
__lowercase: Optional[str] = None
__lowercase: Optional[List[str]] = list_field(default=[])
__lowercase: Optional[List[int]] = list_field(default=[])
@dataclass
class __A :
'''simple docstring'''
__lowercase: List[int] = list_field(default=[])
__lowercase: List[int] = list_field(default=[1, 2, 3])
__lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
__lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class __A :
'''simple docstring'''
__lowercase: List[int] = field()
__lowercase: str = field()
__lowercase: BasicEnum = field()
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class __A :
'''simple docstring'''
__lowercase: int
__lowercase: "BasicEnum" = field()
__lowercase: "Optional[bool]" = None
__lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""})
__lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
if is_python_no_less_than_3_10:
@dataclass
class __A :
'''simple docstring'''
__lowercase: bool = False
__lowercase: bool = True
__lowercase: bool | None = None
@dataclass
class __A :
'''simple docstring'''
__lowercase: int | None = None
__lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""})
__lowercase: str | None = None
__lowercase: list[str] | None = list_field(default=[])
__lowercase: list[int] | None = list_field(default=[])
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""}
snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ )
self.assertFalse(example.flag )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" )
expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase_ )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
@dataclass
class __A :
'''simple docstring'''
__lowercase: Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(
UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ )
expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" )
expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase_ )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) )
snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , )
expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" )
os.mkdir(UpperCAmelCase_ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" )
os.mkdir(UpperCAmelCase_ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
| 347 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _A ( _a ):
"""simple docstring"""
def __snake_case ( self : int , __UpperCAmelCase : float):
return 0.0
def lowercase ( A_ , A_ )-> tuple[int | float, int | float]:
'''simple docstring'''
a : Tuple = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
a : List[str] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowercase ( A_ , A_ )-> None:
'''simple docstring'''
a : List[str] = 512
a : Optional[Any] = [1] + [0] * (size - 1)
a : Tuple = [filter_type.process(A_ ) for item in inputs]
a : Optional[int] = [0] * (samplerate - size) # zero-padding
outputs += filler
a : int = np.abs(np.fft.fft(A_ ) )
a : Dict = 20 * np.logaa(A_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
a : Union[str, Any] = get_bounds(A_ , A_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(A_ )
plt.show()
def lowercase ( A_ , A_ )-> None:
'''simple docstring'''
a : Dict = 512
a : str = [1] + [0] * (size - 1)
a : Any = [filter_type.process(A_ ) for item in inputs]
a : Tuple = [0] * (samplerate - size) # zero-padding
outputs += filler
a : Optional[Any] = np.angle(np.fft.fft(A_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(A_ , -2 * pi ) )
plt.show()
| 226 |
"""simple docstring"""
import datasets
__lowercase = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
__lowercase = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
__lowercase = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def lowercase ( A_ , A_ )-> List[str]:
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
"""simple docstring"""
def __snake_case ( self : List[str]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
}) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __snake_case ( self : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple):
return {"accuracy": simple_accuracy(__UpperCAmelCase , __UpperCAmelCase)}
| 226 | 1 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__: int = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = XLMProphetNetTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def A ( self : Dict ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Dict = XLMProphetNetTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Any ) -> Tuple:
UpperCAmelCase : Any = '''[PAD]'''
UpperCAmelCase : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__snake_case ) , 1012 )
def A ( self : int ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def A ( self : Tuple ) -> int:
UpperCAmelCase : Optional[Any] = XLMProphetNetTokenizer(__snake_case , keep_accents=__snake_case )
UpperCAmelCase : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def A ( self : str ) -> List[Any]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : int = '''Hello World!'''
UpperCAmelCase : str = [35389, 6672, 49, 2]
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def A ( self : Dict ) -> Union[str, Any]:
# fmt: off
UpperCAmelCase : int = {'''input_ids''': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 23 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
snake_case_ = precision
snake_case_ = ceil(precision / 14 )
snake_case_ = 426880 * Decimal(10005 ).sqrt()
snake_case_ = 1
snake_case_ = 13591409
snake_case_ = Decimal(SCREAMING_SNAKE_CASE__ )
for k in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE__ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase_ = 50
print(f"""The first {n} digits of pi is: {pi(n)}""") | 8 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
return (-y * np.log(__snake_case ) - (1 - y) * np.log(1 - h )).mean()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = np.dot(__snake_case , __snake_case )
return np.sum(y * scores - np.log(1 + np.exp(__snake_case ) ) )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=70000 ) -> List[str]:
'''simple docstring'''
UpperCamelCase = np.zeros(x.shape[1] )
for iterations in range(__snake_case ):
UpperCamelCase = np.dot(__snake_case , __snake_case )
UpperCamelCase = sigmoid_function(__snake_case )
UpperCamelCase = np.dot(x.T , h - y ) / y.size
UpperCamelCase = theta - alpha * gradient # updating the weights
UpperCamelCase = np.dot(__snake_case , __snake_case )
UpperCamelCase = sigmoid_function(__snake_case )
UpperCamelCase = cost_function(__snake_case , __snake_case )
if iterations % 100 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = datasets.load_iris()
_SCREAMING_SNAKE_CASE = iris.data[:, :2]
_SCREAMING_SNAKE_CASE = (iris.target != 0) * 1
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowercase( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
return sigmoid_function(
np.dot(__snake_case , __snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
(_SCREAMING_SNAKE_CASE) = (x[:, 0].min(), x[:, 0].max())
(_SCREAMING_SNAKE_CASE) = (x[:, 1].min(), x[:, 1].max())
(_SCREAMING_SNAKE_CASE) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_SCREAMING_SNAKE_CASE = np.c_[xxa.ravel(), xxa.ravel()]
_SCREAMING_SNAKE_CASE = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 356 | import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = XLMRobertaTokenizer
__lowerCAmelCase = XLMRobertaTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = XLMRobertaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = """<pad>"""
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(lowerCamelCase_ ) , 1002 )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = XLMRobertaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
UpperCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCamelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = tokenizer_r.save_pretrained(lowerCamelCase_ )
UpperCamelCase = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCamelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
UpperCamelCase = tokenizer_r.from_pretrained(lowerCamelCase_ )
UpperCamelCase = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=True
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
UpperCamelCase = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ )
# Checks everything loads correctly in the same way
UpperCamelCase = tokenizer_r.from_pretrained(lowerCamelCase_ )
UpperCamelCase = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
# Save tokenizer rust, legacy_format=False
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ )
UpperCamelCase = tokenizer_p.save_pretrained(lowerCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase = tokenizer_r.from_pretrained(lowerCamelCase_ )
UpperCamelCase = tokenizer_p.from_pretrained(lowerCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) )
shutil.rmtree(lowerCamelCase_ )
@cached_property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase_ , f.name )
UpperCamelCase = XLMRobertaTokenizer(f.name , keep_accents=lowerCamelCase_ )
UpperCamelCase = pickle.dumps(lowerCamelCase_ )
pickle.loads(lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = """I was born in 92000, and this is falsé."""
UpperCamelCase = tokenizer.tokenize(lowerCamelCase_ )
UpperCamelCase = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = tokenizer.encode(lowerCamelCase_ )
UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = """Hello World!"""
UpperCamelCase = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
UpperCamelCase = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = {"""input_ids""": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 165 | 0 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_SCREAMING_SNAKE_CASE : Dict = "base_with_context"
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
snake_case_ = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case_ = weights[f'layers_{lyr_num}']
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
snake_case_ = ly_weight["attention"]
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : Tuple ):
'''simple docstring'''
snake_case_ = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case_ = weights[f'layers_{lyr_num}']
snake_case_ = ly_weight["attention"]
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case_ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCamelCase_( snake_case : Dict , snake_case : int ):
'''simple docstring'''
snake_case_ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=snake_case )
snake_case_ = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
snake_case_ = weights[f'layers_{lyr_num}']
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
snake_case_ = ly_weight["self_attention"]
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case_ = ly_weight["MultiHeadDotProductAttention_0"]
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
snake_case_ = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
snake_case_ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
snake_case_ = jnp.tree_util.tree_map(onp.array , snake_case )
snake_case_ = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
snake_case_ = os.path.join(args.checkpoint_path , ".." , "config.gin" )
snake_case_ = inference.parse_training_gin_file(snake_case , snake_case )
snake_case_ = inference.InferenceModel(args.checkpoint_path , snake_case )
snake_case_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
snake_case_ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
snake_case_ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
snake_case_ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
snake_case_ = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , snake_case )
snake_case_ = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , snake_case )
snake_case_ = load_decoder(ta_checkpoint["target"]["decoder"] , snake_case )
snake_case_ = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
snake_case_ = SpectrogramDiffusionPipeline(
notes_encoder=snake_case , continuous_encoder=snake_case , decoder=snake_case , scheduler=snake_case , melgan=snake_case , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=F"{MODEL}/checkpoint_500000",
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
main(args)
| 85 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_SCREAMING_SNAKE_CASE : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
lowerCAmelCase_ : Optional[datasets.Features] = None
def UpperCamelCase_( snake_case : "pyspark.sql.DataFrame" , snake_case : List[int] , ):
'''simple docstring'''
import pyspark
def generate_fn():
snake_case_ = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
snake_case_ = df_with_partition_id.select("*" ).where(f'part_id = {partition_id}' ).drop("part_id" )
snake_case_ = partition_df.collect()
snake_case_ = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self , a__ , a__=None , ) -> Any:
'''simple docstring'''
snake_case_ = df
snake_case_ = partition_order or range(self.df.rdd.getNumPartitions() )
snake_case_ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> Union[str, Any]:
'''simple docstring'''
yield from self.generate_examples_fn()
def lowerCAmelCase__ ( self , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
def lowerCAmelCase__ ( self , a__ , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = self.split_shard_indices_by_worker(a__ , a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.partition_order )
class _snake_case ( datasets.DatasetBuilder ):
lowerCAmelCase_ : Dict = SparkConfig
def __init__( self , a__ , a__ = None , a__ = None , **a__ , ) -> str:
'''simple docstring'''
import pyspark
snake_case_ = pyspark.sql.SparkSession.builder.getOrCreate()
snake_case_ = df
snake_case_ = working_dir
super().__init__(
cache_dir=a__ , config_name=str(self.df.semanticHash() ) , **a__ , )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
def create_cache_and_write_probe(a__ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a__ )
snake_case_ = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a__ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case_ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(a__ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
snake_case_ = self.df.count()
snake_case_ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case_ = (
self.df.limit(a__ )
.repartition(1 )
.mapInArrow(a__ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case_ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case_ = min(a__ , int(approx_total_size / max_shard_size ) )
snake_case_ = self.df.repartition(a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
snake_case_ = ParquetWriter if file_format == "parquet" else ArrowWriter
snake_case_ = os.path.join(self._working_dir , os.path.basename(a__ ) ) if self._working_dir else fpath
snake_case_ = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case_ = self.config.features
snake_case_ = self._writer_batch_size
snake_case_ = self._fs.storage_options
def write_arrow(a__ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case_ = pyspark.TaskContext().taskAttemptId()
snake_case_ = next(a__ , a__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
snake_case_ = 0
snake_case_ = writer_class(
features=a__ , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([first_batch] )
writer.write_table(a__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
snake_case_ = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([batch] )
writer.write_table(a__ )
if writer._num_bytes > 0:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a__ ) ):
snake_case_ = os.path.join(os.path.dirname(a__ ) , os.path.basename(a__ ) )
shutil.move(a__ , a__ )
snake_case_ = (
self.df.mapInArrow(a__ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase__ ( self , a__ , a__ = "arrow" , a__ = None , a__ = None , **a__ , ) -> int:
'''simple docstring'''
self._validate_cache_dir()
snake_case_ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a__ )
snake_case_ = not is_remote_filesystem(self._fs )
snake_case_ = os.path.join if is_local else posixpath.join
snake_case_ = "-TTTTT-SSSSS-of-NNNNN"
snake_case_ = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
snake_case_ = path_join(self._output_dir , a__ )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = []
snake_case_ = []
for task_id, content in self._prepare_split_single(a__ , a__ , a__ ):
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a__ )
snake_case_ = total_num_examples
snake_case_ = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
snake_case_ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case_ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a__ , a__ , a__ , ):
rename(
a__ , fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace("TTTTT-SSSSS" , F'{global_shard_id:05d}' ).replace("NNNNN" , F'{total_shards:05d}' ) , )
snake_case_ = []
snake_case_ = 0
for i in range(len(a__ ) ):
snake_case_ , snake_case_ = task_id_and_num_shards[i]
for shard_id in range(a__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a__ , len(a__ ) ).map(lambda a__ : _rename_shard(*a__ ) ).collect()
else:
# don't use any pattern
snake_case_ = 0
snake_case_ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace(a__ , "" ) , )
def lowerCAmelCase__ ( self , a__ , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 85 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ) -> bool:
snake_case = len(__lowerCAmelCase )
snake_case = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
snake_case = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
snake_case = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
snake_case = subset[i - 1][j]
if arr[i - 1] <= j:
snake_case = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = 0
def lowerCAmelCase ( self : str )-> Any:
snake_case = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case = AutoImageProcessor.from_pretrained(__snake_case ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case = CLIPImageProcessor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
snake_case = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Dict:
with self.assertRaisesRegex(
__snake_case , """clip-base is not a local folder and is not a valid model identifier""" ):
snake_case = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCAmelCase ( self : Tuple )-> int:
with self.assertRaisesRegex(
__snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case = AutoImageProcessor.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCAmelCase ( self : str )-> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : List[str] )-> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCAmelCase ( self : List[str] )-> Dict:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoImageProcessor.register(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Dict )-> Optional[int]:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = True
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__snake_case , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 3 | 0 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowercase__:
"""simple docstring"""
def _lowercase ( self : int ) -> Dict:
torch.manual_seed(0 )
lowercase_ = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
lowercase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase_ = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=SCREAMING_SNAKE_CASE_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
lowercase_ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self : str ) -> int:
torch.manual_seed(0 )
lowercase_ = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
lowercase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase_ = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=SCREAMING_SNAKE_CASE_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
lowercase_ = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase_ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self : str ) -> Tuple:
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = inputs['''prompt''']
lowercase_ = inputs['''generator''']
lowercase_ = inputs['''num_inference_steps''']
lowercase_ = inputs['''output_type''']
if "image" in inputs:
lowercase_ = inputs['''image''']
else:
lowercase_ = None
if "mask_image" in inputs:
lowercase_ = inputs['''mask_image''']
else:
lowercase_ = None
if "original_image" in inputs:
lowercase_ = inputs['''original_image''']
else:
lowercase_ = None
lowercase_ , lowercase_ = pipe.encode_prompt(SCREAMING_SNAKE_CASE_ )
# inputs with prompt converted to embeddings
lowercase_ = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
lowercase_ = image
if mask_image is not None:
lowercase_ = mask_image
if original_image is not None:
lowercase_ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
pipe_loaded.to(SCREAMING_SNAKE_CASE_ )
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = inputs['''generator''']
lowercase_ = inputs['''num_inference_steps''']
lowercase_ = inputs['''output_type''']
# inputs with prompt converted to embeddings
lowercase_ = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
lowercase_ = image
if mask_image is not None:
lowercase_ = mask_image
if original_image is not None:
lowercase_ = original_image
lowercase_ = pipe_loaded(**SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = np.abs(to_np(SCREAMING_SNAKE_CASE_ ) - to_np(SCREAMING_SNAKE_CASE_ ) ).max()
self.assertLess(SCREAMING_SNAKE_CASE_ , 1e-4 )
def _lowercase ( self : Tuple ) -> Any:
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = pipe(**SCREAMING_SNAKE_CASE_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
pipe_loaded.to(SCREAMING_SNAKE_CASE_ )
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = pipe_loaded(**SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = np.abs(to_np(SCREAMING_SNAKE_CASE_ ) - to_np(SCREAMING_SNAKE_CASE_ ) ).max()
self.assertLess(SCREAMING_SNAKE_CASE_ , 1e-4 )
| 30 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__A : str = sys.version_info >= (3, 10)
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""})
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """titi"""
UpperCamelCase__ = """toto"""
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """titi"""
UpperCamelCase__ = """toto"""
UpperCamelCase__ = 42
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = "toto"
def lowercase__ ( self : Tuple )->Optional[int]:
_UpperCAmelCase = BasicEnum(self.foo )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = "toto"
def lowercase__ ( self : List[str] )->List[Any]:
_UpperCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = None
UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""})
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[])
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[1, 2, 3])
UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = field()
UpperCamelCase__ = field()
UpperCamelCase__ = field()
def lowercase__ ( self : int )->str:
_UpperCAmelCase = BasicEnum(self.required_enum )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = field()
UpperCamelCase__ = None
UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""})
UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
if is_python_no_less_than_3_10:
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = None
UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""})
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[])
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : int , __UpperCamelCase : argparse.ArgumentParser , __UpperCamelCase : argparse.ArgumentParser )->Dict:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''}
_UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , __UpperCamelCase ) and yy.get('''choices''' , __UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](__UpperCamelCase ) , yy['''type'''](__UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : int )->str:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('''--bar''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('''--baz''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('''--flag''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((_UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(__UpperCamelCase , look_for_args_file=__UpperCamelCase )
self.assertFalse(example.flag )
def lowercase__ ( self : Dict )->List[Any]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=4_2 , type=__UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Tuple )->List[str]:
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=__UpperCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase )
_UpperCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCamelCase )
for dataclass_type in dataclass_types:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
def lowercase__ ( self : Optional[Any] )->str:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
_UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowercase__ ( self : List[str] )->List[str]:
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = "toto"
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
def lowercase__ ( self : int )->int:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__UpperCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__UpperCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(
__UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
_UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(__UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowercase__ ( self : Union[str, Any] )->Tuple:
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=__UpperCamelCase , type=__UpperCamelCase )
expected.add_argument('''--bar''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=__UpperCamelCase , type=__UpperCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__UpperCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__UpperCamelCase )
_UpperCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCamelCase )
for dataclass_type in dataclass_types:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , bar=__UpperCamelCase , baz=__UpperCamelCase , ces=[] , des=[] ) )
_UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(__UpperCamelCase , Namespace(foo=1_2 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowercase__ ( self : Any )->int:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('''--required_str''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : str )->List[Any]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , )
expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Optional[Any] )->Optional[int]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = {
'''foo''': 1_2,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
_UpperCAmelCase = parser.parse_dict(__UpperCamelCase )[0]
_UpperCAmelCase = BasicExample(**__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->List[str]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = {
'''foo''': 1_2,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(__UpperCamelCase , parser.parse_dict , __UpperCamelCase , allow_extra_keys=__UpperCamelCase )
def lowercase__ ( self : Optional[Any] )->Optional[int]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = {
'''foo''': 1_2,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_json''' )
os.mkdir(__UpperCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
_UpperCAmelCase = BasicExample(**__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Any:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = {
'''foo''': 1_2,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_yaml''' )
os.mkdir(__UpperCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
_UpperCAmelCase = BasicExample(**__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : int )->List[str]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
| 260 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ , lowercase__ : int = set(UpperCAmelCase ), [start]
while stack:
lowercase__ : Optional[int] = stack.pop()
explored.add(UpperCAmelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCAmelCase )
return explored
__a: Optional[Any] = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 214 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
lowercase__ : Tuple = sorted(string.lower() )
return len(UpperCAmelCase ) == len(set(UpperCAmelCase ) )
if __name__ == "__main__":
__a: Union[str, Any] = input("""Enter a string """).strip()
__a: Tuple = is_isogram(input_str)
print(F'{input_str} is {"an" if isogram else "not an"} isogram.')
| 214 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCamelCase ( *_A ):
if not isinstance(_A , _A ):
lowerCAmelCase_ = list(_A )
for i in range(len(_A ) ):
lowerCAmelCase_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_A , _A ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCamelCase ( _A = None , _A = 128 ):
if function is None:
return functools.partial(_A , starting_batch_size=_A )
lowerCAmelCase_ = starting_batch_size
def decorator(*_A , **_A ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
lowerCAmelCase_ = list(inspect.signature(_A ).parameters.keys() )
# Guard against user error
if len(_A ) < (len(_A ) + 1):
lowerCAmelCase_ = ''', '''.join([f"{arg}={value}" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f"Batch size was passed into `{function.__name__}` as the first argument when called."
f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(_A , *_A , **_A )
except Exception as e:
if should_reduce_batch_size(_A ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 278 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __UpperCamelCase ( _A , _A ):
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = features.copy() if features else default_expected_features
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
lowerCAmelCase_ = features.copy() if features else default_expected_features
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __UpperCamelCase ( _A , _A ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowerCAmelCase_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
lowerCAmelCase_ = features.copy()
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read()
_check_json_dataset(_A , _A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __UpperCamelCase ( _A , _A , _A ):
if issubclass(_A , _A ):
lowerCAmelCase_ = jsonl_path
elif issubclass(_A , _A ):
lowerCAmelCase_ = [jsonl_path]
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
def __UpperCamelCase ( _A , _A , _A=("train",) ):
assert isinstance(_A , _A )
for split in splits:
lowerCAmelCase_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = features.copy() if features else default_expected_features
lowerCAmelCase_ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_ = JsonDatasetReader({'''train''': jsonl_path} , features=_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __UpperCamelCase ( _A , _A , _A ):
if split:
lowerCAmelCase_ = {split: jsonl_path}
else:
lowerCAmelCase_ = '''train'''
lowerCAmelCase_ = {'''train''': jsonl_path, '''test''': jsonl_path}
lowerCAmelCase_ = tmp_path / '''cache'''
lowerCAmelCase_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase_ = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __UpperCamelCase ( _A ):
return json.load(_A )
def __UpperCamelCase ( _A ):
return [json.loads(_A ) for line in buffer]
class A :
@pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__ ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
assert isinstance(exported_content[0], UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''', [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
], )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__ ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''', [(True, load_json_lines), (False, load_json)] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, num_proc=2 ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
assert isinstance(exported_content[0], UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''', [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
], )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, lines=UpperCamelCase__, orient=UpperCamelCase__, num_proc=2 ).write()
buffer.seek(0 )
lowerCAmelCase_ = load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__, '''keys''' ) and not hasattr(exported_content[0], '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
with pytest.raises(UpperCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, num_proc=0 )
@pytest.mark.parametrize('''compression, extension''', [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / f"test.json.{extension}"
lowerCAmelCase_ = str(shared_datadir / f"test_file.json.{extension}" )
JsonDatasetWriter(UpperCamelCase__, UpperCamelCase__, compression=UpperCamelCase__ ).write()
with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f:
lowerCAmelCase_ = f.read()
with fsspec.open(UpperCamelCase__, '''rb''', compression='''infer''' ) as f:
lowerCAmelCase_ = f.read()
assert exported_content == original_content
| 278 | 1 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
lowercase : str = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
lowercase : Any = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
lowercase : Dict = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase="uniform_average" , __UpperCamelCase=True ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Any = mean_squared_error(
_A , _A , sample_weight=_A , multioutput=_A , squared=_A )
return {"mse": mse} | 364 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowercase : Dict = (720, 1280) # Height, Width
lowercase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowercase : Tuple = 1 / 100
lowercase : Optional[int] = ""
lowercase : Any = ""
lowercase : Union[str, Any] = ""
lowercase : str = 250
def UpperCAmelCase_ ():
__UpperCamelCase , __UpperCamelCase : Dict = get_dataset(_lowerCAmelCase , _lowerCAmelCase )
for index in range(_lowerCAmelCase ):
__UpperCamelCase : Optional[int] = random.sample(range(len(_lowerCAmelCase ) ) , 4 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = update_image_and_anno(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , filter_scale=_lowerCAmelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase : List[str] = random_chars(32 )
__UpperCamelCase : int = path.split(os.sep )[-1].rsplit("." , 1 )[0]
__UpperCamelCase : Dict = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , _lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
__UpperCamelCase : str = []
for anno in new_annos:
__UpperCamelCase : List[str] = anno[3] - anno[1]
__UpperCamelCase : Union[str, Any] = anno[4] - anno[2]
__UpperCamelCase : List[str] = anno[1] + width / 2
__UpperCamelCase : str = anno[2] + height / 2
__UpperCamelCase : List[str] = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(_lowerCAmelCase )
with open(F'''{file_root}.txt''' , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def UpperCAmelCase_ (_lowerCAmelCase : str , _lowerCAmelCase : str ):
__UpperCamelCase : int = []
__UpperCamelCase : Dict = []
for label_file in glob.glob(os.path.join(_lowerCAmelCase , "*.txt" ) ):
__UpperCamelCase : Any = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(_lowerCAmelCase ) as in_file:
__UpperCamelCase : Any = in_file.readlines()
__UpperCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , F'''{label_name}.jpg''' )
__UpperCamelCase : Dict = []
for obj_list in obj_lists:
__UpperCamelCase : Optional[int] = obj_list.rstrip("\n" ).split(" " )
__UpperCamelCase : int = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase : Any = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase : Optional[int] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_lowerCAmelCase )
labels.append(_lowerCAmelCase )
return img_paths, labels
def UpperCAmelCase_ (_lowerCAmelCase : list , _lowerCAmelCase : list , _lowerCAmelCase : list[int] , _lowerCAmelCase : tuple[int, int] , _lowerCAmelCase : tuple[float, float] , _lowerCAmelCase : float = 0.0 , ):
__UpperCamelCase : Union[str, Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase : int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase : Optional[int] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase : Any = int(scale_x * output_size[1] )
__UpperCamelCase : str = int(scale_y * output_size[0] )
__UpperCamelCase : str = []
__UpperCamelCase : Optional[int] = []
for i, index in enumerate(_lowerCAmelCase ):
__UpperCamelCase : Any = all_img_list[index]
path_list.append(_lowerCAmelCase )
__UpperCamelCase : Dict = all_annos[index]
__UpperCamelCase : Any = cva.imread(_lowerCAmelCase )
if i == 0: # top-left
__UpperCamelCase : Dict = cva.resize(_lowerCAmelCase , (divid_point_x, divid_point_y) )
__UpperCamelCase : Optional[int] = img
for bbox in img_annos:
__UpperCamelCase : List[str] = bbox[1] * scale_x
__UpperCamelCase : Dict = bbox[2] * scale_y
__UpperCamelCase : Optional[Any] = bbox[3] * scale_x
__UpperCamelCase : int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase : Optional[Any] = cva.resize(_lowerCAmelCase , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase : Dict = img
for bbox in img_annos:
__UpperCamelCase : List[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase : str = bbox[2] * scale_y
__UpperCamelCase : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase : List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase : Dict = cva.resize(_lowerCAmelCase , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase : Tuple = img
for bbox in img_annos:
__UpperCamelCase : List[Any] = bbox[1] * scale_x
__UpperCamelCase : str = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase : Dict = bbox[3] * scale_x
__UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase : List[Any] = cva.resize(
_lowerCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase : Tuple = img
for bbox in img_annos:
__UpperCamelCase : Union[str, Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase : Optional[int] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def UpperCAmelCase_ (_lowerCAmelCase : int ):
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅") | 171 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = data
def __iter__( self ):
"""simple docstring"""
for element in self.data:
yield element
def lowerCAmelCase__ ( _UpperCamelCase : List[str]=True ) -> Union[str, Any]:
"""simple docstring"""
snake_case = Accelerator(even_batches=_snake_case )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def lowerCAmelCase__ ( _UpperCamelCase : Accelerator , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : bool = False ) -> str:
"""simple docstring"""
if iterable:
snake_case = DummyIterableDataset(torch.as_tensor(range(_snake_case ) ) )
else:
snake_case = TensorDataset(torch.as_tensor(range(_snake_case ) ) )
snake_case = DataLoader(_snake_case , batch_size=_snake_case )
snake_case = accelerator.prepare(_snake_case )
return dl
def lowerCAmelCase__ ( _UpperCamelCase : Accelerator , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : List[int] , _UpperCamelCase : List[int] , ) -> List[Any]:
"""simple docstring"""
snake_case = create_dataloader(accelerator=_snake_case , dataset_size=_snake_case , batch_size=_snake_case )
snake_case = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def lowerCAmelCase__ ( ) -> List[str]:
"""simple docstring"""
snake_case = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def lowerCAmelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case = create_accelerator(even_batches=_snake_case )
verify_dataloader_batch_sizes(
_snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
_snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def lowerCAmelCase__ ( ) -> Tuple:
"""simple docstring"""
snake_case = create_accelerator(even_batches=_snake_case )
snake_case = torch.nn.Linear(1 , 1 )
snake_case = accelerator.prepare(_snake_case )
snake_case = create_dataloader(_snake_case , dataset_size=3 , batch_size=1 )
snake_case = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_snake_case ):
snake_case = ddp_model(batch[0].float() )
snake_case = output.sum()
loss.backward()
batch_idxs.append(_snake_case )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
with warnings.catch_warnings(record=_snake_case ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , _snake_case )
assert "only supported for multi-GPU" in str(w[-1].message )
def lowerCAmelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case = True
snake_case = False
snake_case = create_accelerator(even_batches=_snake_case )
snake_case = torch.nn.Linear(1 , 1 )
snake_case = accelerator.prepare(_snake_case )
snake_case = create_dataloader(_snake_case , dataset_size=3 , batch_size=1 )
snake_case = create_dataloader(_snake_case , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_snake_case ):
snake_case = train_dl.batch_sampler.even_batches
snake_case = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def lowerCAmelCase__ ( ) -> List[str]:
"""simple docstring"""
snake_case = True
snake_case = False
snake_case = create_accelerator(even_batches=_snake_case )
snake_case = torch.nn.Linear(1 , 1 )
snake_case = accelerator.prepare(_snake_case )
create_dataloader(_snake_case , dataset_size=3 , batch_size=1 , iterable=_snake_case )
snake_case = create_dataloader(_snake_case , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_snake_case ):
snake_case = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def lowerCAmelCase__ ( ) -> str:
"""simple docstring"""
snake_case = create_accelerator()
snake_case = torch.nn.Linear(1 , 1 )
snake_case = accelerator.prepare(_snake_case )
create_dataloader(_snake_case , dataset_size=3 , batch_size=1 , iterable=_snake_case )
with warnings.catch_warnings(record=_snake_case ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_snake_case ):
pass
assert issubclass(w[-1].category , _snake_case )
assert "only supported for map-style datasets" in str(w[-1].message )
def lowerCAmelCase__ ( ) -> List[str]:
"""simple docstring"""
snake_case = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
snake_case = accelerator.state.distributed_type
snake_case = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_snake_case )
snake_case = original_state
if __name__ == "__main__":
main()
| 150 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = "mock-s3-bucket"
__magic_name__ : Any = F'''s3://{mock_bucket}'''
__magic_name__ : str = extract_path_from_uri(_snake_case )
assert dataset_path.startswith("s3://" ) is False
__magic_name__ : Tuple = "./local/path"
__magic_name__ : Optional[Any] = extract_path_from_uri(_snake_case )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : str = is_remote_filesystem(_snake_case )
assert is_remote is True
__magic_name__ : Optional[int] = fsspec.filesystem("file" )
__magic_name__ : int = is_remote_filesystem(_snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , _snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any ) -> int:
'''simple docstring'''
__magic_name__ : Any = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
__magic_name__ : str = input_paths[compression_fs_class.protocol]
if input_path is None:
__magic_name__ : Dict = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_snake_case )
__magic_name__ : str = fsspec.filesystem(compression_fs_class.protocol , fo=_snake_case )
assert isinstance(_snake_case , _snake_case )
__magic_name__ : int = os.path.basename(_snake_case )
__magic_name__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(_snake_case , "r" , encoding="utf-8" ) as f, open(_snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
__magic_name__ : int = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
__magic_name__ : int = compressed_file_paths[protocol]
__magic_name__ : Tuple = "dataset.jsonl"
__magic_name__ : List[str] = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
__magic_name__ , *__magic_name__ : Optional[Any] = fsspec.get_fs_token_paths(_snake_case )
assert fs.isfile(_snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : Tuple ) -> str:
'''simple docstring'''
__magic_name__ : int = hf_api.dataset_info(_snake_case , token=_snake_case )
__magic_name__ : Optional[Any] = HfFileSystem(repo_info=_snake_case , token=_snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(_snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Optional[Any] = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_snake_case , _snake_case , clobber=_snake_case )
with pytest.warns(_snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_snake_case ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 281 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowerCAmelCase = get_tests_dir('''fixtures''')
class A ( unittest.TestCase ):
def _A (self ):
# A mock response for an HTTP head request to emulate server down
__lowercase= mock.Mock()
__lowercase= 5_0_0
__lowercase= {}
__lowercase= HTTPError
__lowercase= {}
# Download this model to make sure it's in the cache.
__lowercase= ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowerCAmelCase ) as mock_head:
__lowercase= ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def _A (self ):
# This test is for deprecated behavior and can be removed in v5
__lowercase= ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def _A (self ):
with self.assertRaises(lowerCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowercase= AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
__lowercase= AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor' )
self.assertIsNotNone(lowerCAmelCase )
@is_staging_test
class A ( unittest.TestCase ):
@classmethod
def _A (cls ):
__lowercase= TOKEN
HfFolder.save_token(lowerCAmelCase )
@classmethod
def _A (cls ):
try:
delete_repo(token=cls._token , repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def _A (self ):
__lowercase= ViTImageProcessor.from_pretrained(lowerCAmelCase )
image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token )
__lowercase= ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCAmelCase , repo_id='test-image-processor' , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
__lowercase= ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
def _A (self ):
__lowercase= ViTImageProcessor.from_pretrained(lowerCAmelCase )
image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token )
__lowercase= ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCAmelCase , repo_id='valid_org/test-image-processor-org' , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
__lowercase= ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
def _A (self ):
CustomImageProcessor.register_for_auto_class()
__lowercase= CustomImageProcessor.from_pretrained(lowerCAmelCase )
image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , )
__lowercase= AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor' , trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor' )
| 304 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase = '''=======
>>>>>>>
'''
lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(lowerCAmelCase )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__lowercase= ''
continue
elif "from absl import logging" in out_line:
__lowercase= 'from datasets import logging\n'
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(lowerCAmelCase )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 304 | 1 |
def lowerCAmelCase__(__snake_case ) -> bool:
'''simple docstring'''
if num < 0:
return False
lowerCamelCase__ = num
lowerCamelCase__ = 0
while num > 0:
lowerCamelCase__ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a = logging.get_logger(__name__)
_a = {"vocab_file": "spiece.model"}
_a = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_a = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase__ = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
lowerCamelCase__ = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCamelCase__ = '''<|endoftext|>''' if eos_token is None else eos_token
lowerCamelCase__ = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCamelCase__ = unk_token if pad_token is None else pad_token
lowerCamelCase__ = eos_token if bos_token is None else bos_token
else:
lowerCamelCase__ = '''<pad>''' if pad_token is None else pad_token
lowerCamelCase__ = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = remove_space
lowerCamelCase__ = keep_accents
lowerCamelCase__ = vocab_file
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCamelCase__ = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCamelCase__ = re.compile(
F'[{"".join(map(__lowerCAmelCase , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.non_printing_characters_re.sub('''''' , __lowerCAmelCase )
# Normalize whitespaces
lowerCamelCase__ = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
lowerCamelCase__ = unicodedata.normalize('''NFC''' , __lowerCAmelCase )
return text
def __lowerCamelCase ( self , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.preprocess_text(__lowerCAmelCase )
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(__lowerCAmelCase )
@staticmethod
def __lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
return out_string
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = ''''''
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
lowerCamelCase__ = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
'''simple docstring'''
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = self.preprocess_text(__lowerCAmelCase )
lowerCamelCase__ = self.sp_model.encode(__lowerCAmelCase )
else:
lowerCamelCase__ = [self.preprocess_text(__lowerCAmelCase ) for t in text]
lowerCamelCase__ = self.sp_model.encode(__lowerCAmelCase )
if return_tensors is True or return_tensors == "pt":
lowerCamelCase__ = torch.tensor(__lowerCAmelCase )
return token_ids
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.decode(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCamelCase__ = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(__lowerCAmelCase ) + F'{self.bos_token}Bot:'
)
return self.encode(text=__lowerCAmelCase )
| 209 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class UpperCAmelCase ( snake_case_ ):
_lowercase: str = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowercase: ClassVar[Features] = Features({'''text''': Value('''string''' )} )
_lowercase: ClassVar[Features] = Features({} )
_lowercase: str = "text"
@property
def lowercase__ ( self : int ) -> Dict[str, str]:
return {self.text_column: "text"}
| 371 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def UpperCamelCase__ ( lowerCAmelCase=None , lowerCAmelCase=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=lowerCAmelCase )
@dataclass
class UpperCAmelCase :
_lowercase: str = field(
metadata={'''help''': '''The csv file to plot.'''} , )
_lowercase: bool = field(
default=snake_case_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
_lowercase: bool = field(
default=snake_case_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
_lowercase: bool = field(
default=snake_case_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
_lowercase: bool = field(
default=snake_case_ , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
_lowercase: Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
_lowercase: Optional[List[str]] = list_field(
default=snake_case_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
try:
int(lowerCAmelCase )
return True
except ValueError:
return False
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
try:
float(lowerCAmelCase )
return True
except ValueError:
return False
class UpperCAmelCase :
def __init__( self : List[str] , __snake_case : Union[str, Any] ) -> int:
_lowerCAmelCase = args
_lowerCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
_lowerCAmelCase = csv.DictReader(__snake_case )
for row in reader:
_lowerCAmelCase = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
_lowerCAmelCase = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
_lowerCAmelCase = float(row["""result"""] )
def lowercase__ ( self : Dict ) -> str:
_lowerCAmelCase , _lowerCAmelCase = plt.subplots()
_lowerCAmelCase = """Time usage""" if self.args.is_time else """Memory usage"""
_lowerCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_lowerCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
_lowerCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
_lowerCAmelCase = self.result_dict[model_name]["""result"""]
((_lowerCAmelCase) , (_lowerCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_lowerCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_lowerCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__snake_case , )
else:
_lowerCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_lowerCAmelCase) , (_lowerCAmelCase)) = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
_lowerCAmelCase = np.asarray(__snake_case , __snake_case )[: len(__snake_case )]
plt.scatter(
__snake_case , __snake_case , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(__snake_case , __snake_case , """--""" )
title_str += f" {label_model_name} vs."
_lowerCAmelCase = title_str[:-4]
_lowerCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__snake_case )
plt.xlabel(__snake_case )
plt.ylabel(__snake_case )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser(lowerCAmelCase )
_lowerCAmelCase = parser.parse_args_into_dataclasses()[0]
_lowerCAmelCase = Plot(args=lowerCAmelCase )
plot.plot()
if __name__ == "__main__":
main()
| 220 | 0 |
"""simple docstring"""
import enum
import shutil
import sys
_a , _a : int = shutil.get_terminal_size()
_a : Optional[Any] = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class __A ( enum.Enum ):
_UpperCamelCase : int = 0
_UpperCamelCase : List[str] = 1
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Tuple="" ) -> Optional[int]:
sys.stdout.write(str(_lowerCamelCase ) + end )
sys.stdout.flush()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : str ,_lowerCamelCase : Union[str, Any]="" ) -> Optional[int]:
forceWrite(f"\u001b[{color}m{content}\u001b[0m" ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> int:
forceWrite("""\r""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : str ) -> Optional[Any]:
forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def SCREAMING_SNAKE_CASE ( ) -> Any:
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 44 | """simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44 | 1 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = tokenizer(example["""content"""] , truncation=A__ )["""input_ids"""]
__lowerCamelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase_ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase_ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ = multiprocessing.cpu_count()
UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = load_dataset(args.dataset_name, split='train')
print(f"""Dataset loaded in {time.time()-t_start:.2f}s""")
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(f"""Dataset tokenized in {time.time()-t_start:.2f}s""")
UpperCAmelCase_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 363 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: str , **UpperCamelCase_: int ):
super().__init__(**UpperCamelCase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(UpperCamelCase_ )
def __call__( self: Union[str, Any] , UpperCamelCase_: Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCamelCase_: Union[str, List[str]] = None , **UpperCamelCase_: List[str] , ):
if "text_queries" in kwargs:
__lowerCamelCase = kwargs.pop("""text_queries""" )
if isinstance(UpperCamelCase_ , (str, Image.Image) ):
__lowerCamelCase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: Dict ):
__lowerCamelCase = {}
if "threshold" in kwargs:
__lowerCamelCase = kwargs["""threshold"""]
if "top_k" in kwargs:
__lowerCamelCase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = inputs["""candidate_labels"""]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = candidate_labels.split(""",""" )
__lowerCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase_ ):
__lowerCamelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = model_inputs.pop("""target_size""" )
__lowerCamelCase = model_inputs.pop("""candidate_label""" )
__lowerCamelCase = model_inputs.pop("""is_last""" )
__lowerCamelCase = self.model(**UpperCamelCase_ )
__lowerCamelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Union[str, Any]=None ):
__lowerCamelCase = []
for model_output in model_outputs:
__lowerCamelCase = model_output["""candidate_label"""]
__lowerCamelCase = BaseModelOutput(UpperCamelCase_ )
__lowerCamelCase = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
__lowerCamelCase = outputs["""scores"""][index].item()
__lowerCamelCase = self._get_bounding_box(outputs["""boxes"""][index][0] )
__lowerCamelCase = {"""score""": score, """label""": label, """box""": box}
results.append(UpperCamelCase_ )
__lowerCamelCase = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ )
if top_k:
__lowerCamelCase = results[:top_k]
return results
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = box.int().tolist()
__lowerCamelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 29 | 0 |
'''simple docstring'''
from math import factorial
def UpperCamelCase_( snake_case : int = 1_0_0 ):
'''simple docstring'''
return sum(map(snake_case , str(factorial(snake_case ) ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 85 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE : int = {
"gpt-neox-20b": 2048,
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : str = VOCAB_FILES_NAMES
lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : str = ["input_ids", "attention_mask"]
def __init__( self , a__=None , a__=None , a__=None , a__="<|endoftext|>" , a__="<|endoftext|>" , a__="<|endoftext|>" , a__=False , **a__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , add_prefix_space=a__ , **a__ , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space:
snake_case_ = getattr(a__ , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**a__ )
snake_case_ = add_prefix_space
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def lowerCAmelCase__ ( self , a__ ) -> List[int]:
'''simple docstring'''
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids
| 85 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "fnet"
def __init__( self , __A=3_2000 , __A=768 , __A=12 , __A=3072 , __A="gelu_new" , __A=0.1 , __A=512 , __A=4 , __A=0.02 , __A=1E-1_2 , __A=False , __A=512 , __A=3 , __A=1 , __A=2 , **__A , ) -> int:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
a =vocab_size
a =max_position_embeddings
a =hidden_size
a =num_hidden_layers
a =intermediate_size
a =hidden_act
a =hidden_dropout_prob
a =initializer_range
a =type_vocab_size
a =layer_norm_eps
a =use_tpu_fourier_optimizations
a =tpu_short_seq_length | 370 |
"""simple docstring"""
import os
import sys
import unittest
lowerCamelCase_ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase_ : Dict = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
lowerCamelCase_ : Dict = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class __A ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =get_test_to_tester_mapping(__A )
a =get_test_to_tester_mapping(__A )
a ={'''BertModelTest''': '''BertModelTester'''}
a ={
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(__A ) , __A )
self.assertEqual(get_test_info.to_json(__A ) , __A )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =get_model_to_test_mapping(__A )
a =get_model_to_test_mapping(__A )
a ={
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
a ={
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(__A ) , __A )
self.assertEqual(get_test_info.to_json(__A ) , __A )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =get_model_to_tester_mapping(__A )
a =get_model_to_tester_mapping(__A )
a ={
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
a ={
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(__A ) , __A )
self.assertEqual(get_test_info.to_json(__A ) , __A ) | 215 | 0 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
A__ : str = logging.get_logger(__name__)
enable_full_determinism()
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = UNetaDModel
lowerCamelCase : List[str] = 'sample'
@property
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : List[str] = 4
__lowerCamelCase : str = 3
__lowerCamelCase : List[str] = (32, 32)
__lowerCamelCase : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = torch.tensor([10] ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self ) -> Any:
return (3, 32, 32)
@property
def lowercase_ ( self ) -> str:
return (3, 32, 32)
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Union[str, Any] = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
__lowerCamelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Any = UNetaDModel
lowerCamelCase : List[Any] = 'sample'
@property
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Dict = 4
__lowerCamelCase : int = 4
__lowerCamelCase : List[str] = (32, 32)
__lowerCamelCase : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = torch.tensor([10] ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self ) -> Optional[Any]:
return (4, 32, 32)
@property
def lowercase_ ( self ) -> Optional[Any]:
return (4, 32, 32)
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Optional[int] = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
__lowerCamelCase : Any = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase , __lowerCamelCase : str = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def lowercase_ ( self ) -> Dict:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def lowercase_ ( self ) -> int:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
__lowerCamelCase , __lowerCamelCase : str = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=SCREAMING_SNAKE_CASE_ )
model_accelerate.to(SCREAMING_SNAKE_CASE_ )
model_accelerate.eval()
__lowerCamelCase : Dict = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
__lowerCamelCase : List[Any] = noise.to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = torch.tensor([10] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = model_accelerate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=SCREAMING_SNAKE_CASE_ , low_cpu_mem_usage=SCREAMING_SNAKE_CASE_ )
model_normal_load.to(SCREAMING_SNAKE_CASE_ )
model_normal_load.eval()
__lowerCamelCase : List[Any] = model_normal_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )['sample']
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-3 )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : str = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__lowerCamelCase : Optional[int] = noise.to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = torch.tensor([10] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
__lowerCamelCase : str = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__lowerCamelCase : Optional[int] = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-3 ) )
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Dict = UNetaDModel
lowerCamelCase : Union[str, Any] = 'sample'
@property
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=(32, 32) ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = 4
__lowerCamelCase : int = 3
__lowerCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=SCREAMING_SNAKE_CASE_ )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self ) -> Optional[Any]:
return (3, 32, 32)
@property
def lowercase_ ( self ) -> Dict:
return (3, 32, 32)
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Optional[Any] = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
__lowerCamelCase : int = self.dummy_input
return init_dict, inputs_dict
@slow
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.dummy_input
__lowerCamelCase : Union[str, Any] = floats_tensor((4, 3) + (2_56, 2_56) ).to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = noise
__lowerCamelCase : Dict = model(**SCREAMING_SNAKE_CASE_ )
assert image is not None, "Make sure output is not None"
@slow
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Any = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = 4
__lowerCamelCase : List[str] = 3
__lowerCamelCase : Optional[Any] = (2_56, 2_56)
__lowerCamelCase : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = torch.tensor(batch_size * [1E-4] ).to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
__lowerCamelCase : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__lowerCamelCase : List[Any] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-2 ) )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = 4
__lowerCamelCase : Dict = 3
__lowerCamelCase : Union[str, Any] = (32, 32)
__lowerCamelCase : Dict = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = torch.tensor(batch_size * [1E-4] ).to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
__lowerCamelCase : Optional[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__lowerCamelCase : Optional[Any] = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-2 ) )
def lowercase_ ( self ) -> Any:
# not required for this model
pass
| 185 |
'''simple docstring'''
A__ : Any = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 185 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : List[Any] = 'decision_transformer'
__lowercase : List[Any] = ['past_key_values']
__lowercase : List[Any] = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=17 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
A_ = state_dim
A_ = act_dim
A_ = hidden_size
A_ = max_ep_len
A_ = action_tanh
A_ = vocab_size
A_ = n_positions
A_ = n_layer
A_ = n_head
A_ = n_inner
A_ = activation_function
A_ = resid_pdrop
A_ = embd_pdrop
A_ = attn_pdrop
A_ = layer_norm_epsilon
A_ = initializer_range
A_ = scale_attn_weights
A_ = use_cache
A_ = scale_attn_by_inverse_layer_idx
A_ = reorder_and_upcast_attn
A_ = bos_token_id
A_ = eos_token_id
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 351 | '''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__snake_case : str = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ) -> Dict:
A_ = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def __A ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def __A ( self ) -> str:
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id='''test-model-flax''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def __A ( self ) -> List[str]:
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F'''{key} not identical''' )
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : Tuple ) -> Dict:
A_ = True
A_ = flatten_dict(modela.params )
A_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
A_ = False
return models_are_equal
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> List[str]:
A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
A_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> List[Any]:
A_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A_ = FlaxBertModel(_SCREAMING_SNAKE_CASE )
A_ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size='''10KB''' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __A ( self ) -> Dict:
A_ = '''bert'''
A_ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = '''bert'''
A_ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 18 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase__ : List[Any] = '''pt'''
elif is_tf_available():
lowercase__ : Dict = '''tf'''
else:
lowercase__ : Optional[int] = '''jax'''
class lowercase_ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = PerceiverTokenizer
UpperCAmelCase_ : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
super().setUp()
lowerCAmelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=20 , __SCREAMING_SNAKE_CASE=5 ) ->Tuple[str, list]:
lowerCAmelCase = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
try:
lowerCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase = list(filter(lambda __SCREAMING_SNAKE_CASE : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = list(filter(lambda __SCREAMING_SNAKE_CASE : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) )
if max_length is not None and len(__SCREAMING_SNAKE_CASE ) > max_length:
lowerCAmelCase = toks[:max_length]
if min_length is not None and len(__SCREAMING_SNAKE_CASE ) < min_length and len(__SCREAMING_SNAKE_CASE ) > 0:
while len(__SCREAMING_SNAKE_CASE ) < min_length:
lowerCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase = tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
if " " not in output_txt and len(__SCREAMING_SNAKE_CASE ) > 1:
lowerCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
)
if with_prefix_space:
lowerCAmelCase = ''' ''' + output_txt
lowerCAmelCase = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
return output_txt, output_ids
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.perceiver_tokenizer
lowerCAmelCase = '''Unicode €.'''
lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , __SCREAMING_SNAKE_CASE )
# decoding
lowerCAmelCase = tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , '''[CLS]Unicode €.[SEP]''' )
lowerCAmelCase = tokenizer('''e è é ê ë''' )
lowerCAmelCase = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , __SCREAMING_SNAKE_CASE )
# decoding
lowerCAmelCase = tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = self.perceiver_tokenizer
lowerCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if FRAMEWORK != "jax":
lowerCAmelCase = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.perceiver_tokenizer
lowerCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __SCREAMING_SNAKE_CASE )
self.assertIn('''attention_mask''' , __SCREAMING_SNAKE_CASE )
self.assertNotIn('''decoder_input_ids''' , __SCREAMING_SNAKE_CASE )
self.assertNotIn('''decoder_attention_mask''' , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = self.perceiver_tokenizer
lowerCAmelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase = tokenizer(
text_target=__SCREAMING_SNAKE_CASE , max_length=32 , padding='''max_length''' , truncation=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer.__class__.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = after_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer.__class__.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = after_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase = tokenizer.__class__.from_pretrained(__SCREAMING_SNAKE_CASE , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [F"<extra_id_{i}>" for i in range(125 )]
lowerCAmelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__SCREAMING_SNAKE_CASE , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase = tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__SCREAMING_SNAKE_CASE )]
lowerCAmelCase = tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
pass
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = self.get_tokenizers(fast=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCAmelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowerCAmelCase = tokenizer.convert_tokens_to_string(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 338 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str = 'glpn'
def __init__(self , A=3 , A=4 , A=[2, 2, 2, 2] , A=[8, 4, 2, 1] , A=[32, 64, 160, 256] , A=[7, 3, 3, 3] , A=[4, 2, 2, 2] , A=[1, 2, 5, 8] , A=[4, 4, 4, 4] , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=0.1 , A=1E-6 , A=64 , A=10 , A=-1 , **A , ) -> Any:
"""simple docstring"""
super().__init__(**A )
_a = num_channels
_a = num_encoder_blocks
_a = depths
_a = sr_ratios
_a = hidden_sizes
_a = patch_sizes
_a = strides
_a = mlp_ratios
_a = num_attention_heads
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = drop_path_rate
_a = layer_norm_eps
_a = decoder_hidden_size
_a = max_depth
_a = head_in_index
| 211 | 0 |
from math import sqrt
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
__a =0
for i in range(1 , int(sqrt(_snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(_snake_case ):
total += i + n // i
elif i == sqrt(_snake_case ):
total += i
return total - n
def UpperCamelCase_( _snake_case : int = 10000 ):
"""simple docstring"""
__a =sum(
i
for i in range(1 , _snake_case )
if sum_of_divisors(sum_of_divisors(_snake_case ) ) == i and sum_of_divisors(_snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 351 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def __magic_name__ ( *__snake_case , **__snake_case ) -> List[str]:
'''simple docstring'''
pass
def UpperCamelCase_( _snake_case : Image ):
"""simple docstring"""
__a =hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __magic_name__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
__a =DepthEstimationPipeline(model=__snake_case , image_processor=__snake_case )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __magic_name__ ( self , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , __snake_case )
import datasets
__a =datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__a =depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , __snake_case , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a ='Intel/dpt-large'
__a =pipeline('depth-estimation' , model=__snake_case )
__a =depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
__a =hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 308 | 0 |
from bisect import bisect
from itertools import accumulate
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
_snake_case : List[str] = sorted(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , key=lambda SCREAMING_SNAKE_CASE__ : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : str = [i[0] for i in r], [i[1] for i in r]
_snake_case : Optional[Any] = list(accumulate(SCREAMING_SNAKE_CASE__ ) )
_snake_case : Dict = bisect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a__ = logging.get_logger(__name__)
# General docstring
a__ = """RegNetConfig"""
# Base docstring
a__ = """facebook/regnet-y-040"""
a__ = [1, 10_88, 7, 7]
# Image classification docstring
a__ = """facebook/regnet-y-040"""
a__ = """tabby, tabby cat"""
a__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[str] = "relu" , ) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : int = nn.Convad(
lowerCAmelCase , lowerCAmelCase , kernel_size=lowerCAmelCase , stride=lowerCAmelCase , padding=kernel_size // 2 , groups=lowerCAmelCase , bias=lowerCAmelCase , )
_snake_case : List[Any] = nn.BatchNormad(lowerCAmelCase)
_snake_case : Tuple = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
_snake_case : Tuple = self.convolution(lowerCAmelCase)
_snake_case : Any = self.normalization(lowerCAmelCase)
_snake_case : List[Any] = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act)
_snake_case : Dict = config.num_channels
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
_snake_case : str = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""")
_snake_case : Any = self.embedder(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , stride=lowerCAmelCase , bias=lowerCAmelCase)
_snake_case : Tuple = nn.BatchNormad(lowerCAmelCase)
def UpperCamelCase_ ( self : int , lowerCAmelCase : Tensor) -> Tensor:
"""simple docstring"""
_snake_case : Optional[Any] = self.convolution(lowerCAmelCase)
_snake_case : Optional[int] = self.normalization(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int) -> Any:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1))
_snake_case : Optional[Any] = nn.Sequential(
nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.ReLU() , nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.Sigmoid() , )
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
_snake_case : Dict = self.pooler(lowerCAmelCase)
_snake_case : List[str] = self.attention(lowerCAmelCase)
_snake_case : str = hidden_state * attention
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[int] = in_channels != out_channels or stride != 1
_snake_case : Optional[Any] = max(1 , out_channels // config.groups_width)
_snake_case : Union[str, Any] = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Tuple = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Dict = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = hidden_state
_snake_case : int = self.layer(lowerCAmelCase)
_snake_case : Dict = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : str = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : int = in_channels != out_channels or stride != 1
_snake_case : Dict = max(1 , out_channels // config.groups_width)
_snake_case : Tuple = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Dict = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetSELayer(lowerCAmelCase , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : List[Any]) -> Tuple:
"""simple docstring"""
_snake_case : Tuple = hidden_state
_snake_case : List[Any] = self.layer(lowerCAmelCase)
_snake_case : List[str] = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : int = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , ) -> int:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
_snake_case : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , ) , *[layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) for _ in range(depth - 1)] , )
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
_snake_case : List[str] = self.layers(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
_snake_case : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(lowerCAmelCase , config.depths[1:]):
self.stages.append(RegNetStage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , depth=lowerCAmelCase))
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Tensor , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
_snake_case : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case : Optional[int] = hidden_states + (hidden_state,)
_snake_case : Dict = stage_module(lowerCAmelCase)
if output_hidden_states:
_snake_case : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase , hidden_states=lowerCAmelCase)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = RegNetConfig
snake_case_ : List[Any] = """regnet"""
snake_case_ : Any = """pixel_values"""
snake_case_ : Optional[Any] = True
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""")
elif isinstance(lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=False) -> Optional[int]:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : Optional[Any] = value
a__ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
a__ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Any = config
_snake_case : Any = RegNetEmbeddings(lowerCAmelCase)
_snake_case : Dict = RegNetEncoder(lowerCAmelCase)
_snake_case : Tuple = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Tensor , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
_snake_case : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : str = self.embedder(lowerCAmelCase)
_snake_case : Optional[Any] = self.encoder(
lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : Tuple = encoder_outputs[0]
_snake_case : Optional[Any] = self.pooler(lowerCAmelCase)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Union[str, Any] = config.num_labels
_snake_case : List[Any] = RegNetModel(lowerCAmelCase)
# classification head
_snake_case : Union[str, Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : int , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[torch.LongTensor] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
_snake_case : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : Tuple = self.regnet(lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : str = outputs.pooler_output if return_dict else outputs[1]
_snake_case : Optional[Any] = self.classifier(lowerCAmelCase)
_snake_case : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case : List[Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case : Optional[int] = """single_label_classification"""
else:
_snake_case : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
_snake_case : List[str] = MSELoss()
if self.num_labels == 1:
_snake_case : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze())
else:
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
elif self.config.problem_type == "single_label_classification":
_snake_case : Dict = CrossEntropyLoss()
_snake_case : int = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
_snake_case : Optional[int] = BCEWithLogitsLoss()
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
if not return_dict:
_snake_case : Optional[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states)
| 317 | 1 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A = logging.getLogger(__name__)
def __A ( ) -> List[str]:
__a : Tuple = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''')
parser.add_argument('''--file_path''' , type=a_ , default='''data/dump.txt''' , help='''The path to the data.''')
parser.add_argument('''--tokenizer_type''' , type=a_ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''])
parser.add_argument('''--tokenizer_name''' , type=a_ , default='''bert-base-uncased''' , help='''The tokenizer to use.''')
parser.add_argument('''--dump_file''' , type=a_ , default='''data/dump''' , help='''The dump file prefix.''')
__a : List[Any] = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""")
if args.tokenizer_type == "bert":
__a : Dict = BertTokenizer.from_pretrained(args.tokenizer_name)
__a : Any = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__a : Union[str, Any] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__a : int = RobertaTokenizer.from_pretrained(args.tokenizer_name)
__a : Optional[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__a : Tuple = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__a : Optional[int] = GPTaTokenizer.from_pretrained(args.tokenizer_name)
__a : Union[str, Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__a : Tuple = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""")
with open(args.file_path , '''r''' , encoding='''utf8''') as fp:
__a : Any = fp.readlines()
logger.info('''Start encoding''')
logger.info(F"""{len(a_)} examples to process.""")
__a : List[Any] = []
__a : Any = 0
__a : List[Any] = 1_00_00
__a : Tuple = time.time()
for text in data:
__a : List[Any] = F"""{bos} {text.strip()} {sep}"""
__a : Any = tokenizer.encode(a_ , add_special_tokens=a_)
rslt.append(a_)
iter += 1
if iter % interval == 0:
__a : int = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""")
__a : Optional[int] = time.time()
logger.info('''Finished binarization''')
logger.info(F"""{len(a_)} examples processed.""")
__a : int = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
__a : List[Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
__a : Union[str, Any] = [np.uintaa(a_) for d in rslt]
else:
__a : Dict = [np.intaa(a_) for d in rslt]
random.shuffle(rslt_)
logger.info(F"""Dump to {dp_file}""")
with open(a_ , '''wb''') as handle:
pickle.dump(rslt_ , a_ , protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main() | 188 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''openai/whisper-base'''
__lowerCAmelCase = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
__lowerCAmelCase = '''transcriber'''
__lowerCAmelCase = WhisperProcessor
__lowerCAmelCase = WhisperForConditionalGeneration
__lowerCAmelCase = ['''audio''']
__lowerCAmelCase = ['''text''']
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.pre_processor(_UpperCAmelCase , return_tensors='''pt''' ).input_features
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.model.generate(inputs=_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.pre_processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )[0] | 188 | 1 |
from math import factorial
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowercase ) // (factorial(lowercase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f"""fifty-two card deck is: {combinations(5_2, 5)}\n""",
)
print(
'If a class of 40 students must be arranged into groups of',
f"""4 for group projects, there are {combinations(4_0, 4)} ways""",
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f"""are {combinations(1_0, 3)} ways that first, second and""",
'third place can be awarded.',
)
| 124 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCamelCase : List[Any] = 1_0
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> int:
for i in range(lowercase ,lowercase ):
if array[i] == target:
return i
return -1
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
snake_case : Union[str, Any] = 0
snake_case : Optional[Any] = len(lowercase )
while left <= right:
if right - left < precision:
return lin_search(lowercase ,lowercase ,lowercase ,lowercase )
snake_case : List[str] = (left + right) // 3 + 1
snake_case : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
snake_case : List[str] = one_third - 1
elif array[two_third] < target:
snake_case : Any = two_third + 1
else:
snake_case : Dict = one_third + 1
snake_case : Any = two_third - 1
else:
return -1
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> int:
if left < right:
if right - left < precision:
return lin_search(lowercase ,lowercase ,lowercase ,lowercase )
snake_case : str = (left + right) // 3 + 1
snake_case : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowercase ,one_third - 1 ,lowercase ,lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 ,lowercase ,lowercase ,lowercase )
else:
return rec_ternary_search(one_third + 1 ,two_third - 1 ,lowercase ,lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : str = input('Enter numbers separated by comma:\n').strip()
lowerCamelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
lowerCamelCase : int = int(input('Enter the number to be found in the list:\n').strip())
lowerCamelCase : Tuple = ite_ternary_search(collection, target)
lowerCamelCase : Any = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 124 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True])
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path / 'cache'
SCREAMING_SNAKE_CASE = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE = ParquetDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase).read()
_check_parquet_dataset(_UpperCAmelCase , _UpperCAmelCase)
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path / 'cache'
SCREAMING_SNAKE_CASE = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE = (
Features({feature: Value(_UpperCAmelCase) for feature, dtype in features.items()}) if features is not None else None
)
SCREAMING_SNAKE_CASE = ParquetDatasetReader(_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase).read()
_check_parquet_dataset(_UpperCAmelCase , _UpperCAmelCase)
@pytest.mark.parametrize('split' , [None, NamedSplit('train'), 'train', 'test'])
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path / 'cache'
SCREAMING_SNAKE_CASE = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
SCREAMING_SNAKE_CASE = ParquetDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase , split=_UpperCAmelCase).read()
_check_parquet_dataset(_UpperCAmelCase , _UpperCAmelCase)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list])
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if issubclass(_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = parquet_path
elif issubclass(_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = [parquet_path]
SCREAMING_SNAKE_CASE = tmp_path / 'cache'
SCREAMING_SNAKE_CASE = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
SCREAMING_SNAKE_CASE = ParquetDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase).read()
_check_parquet_dataset(_UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=("train",)):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase)
for split in splits:
SCREAMING_SNAKE_CASE = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True])
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path / 'cache'
SCREAMING_SNAKE_CASE = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase).read()
_check_parquet_datasetdict(_UpperCAmelCase , _UpperCAmelCase)
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path / 'cache'
SCREAMING_SNAKE_CASE = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE = (
Features({feature: Value(_UpperCAmelCase) for feature, dtype in features.items()}) if features is not None else None
)
SCREAMING_SNAKE_CASE = ParquetDatasetReader({'train': parquet_path} , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase).read()
_check_parquet_datasetdict(_UpperCAmelCase , _UpperCAmelCase)
@pytest.mark.parametrize('split' , [None, NamedSplit('train'), 'train', 'test'])
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if split:
SCREAMING_SNAKE_CASE = {split: parquet_path}
else:
SCREAMING_SNAKE_CASE = 'train'
SCREAMING_SNAKE_CASE = {'train': parquet_path, 'test': parquet_path}
SCREAMING_SNAKE_CASE = tmp_path / 'cache'
SCREAMING_SNAKE_CASE = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
SCREAMING_SNAKE_CASE = ParquetDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase).read()
_check_parquet_datasetdict(_UpperCAmelCase , _UpperCAmelCase , splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = ParquetDatasetWriter(_UpperCAmelCase , tmp_path / 'foo.parquet')
assert writer.write() > 0
SCREAMING_SNAKE_CASE = pq.ParquetFile(tmp_path / 'foo.parquet')
SCREAMING_SNAKE_CASE = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = str(shared_datadir / 'test_image_rgb.jpg')
SCREAMING_SNAKE_CASE = {'image': [image_path]}
SCREAMING_SNAKE_CASE = Features({'image': Image()})
SCREAMING_SNAKE_CASE = Dataset.from_dict(_UpperCAmelCase , features=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = ParquetDatasetWriter(_UpperCAmelCase , tmp_path / 'foo.parquet')
assert writer.write() > 0
SCREAMING_SNAKE_CASE = Dataset.from_parquet(str(tmp_path / 'foo.parquet'))
assert dataset.features == reloaded_dataset.features
SCREAMING_SNAKE_CASE = ParquetDatasetReader(str(tmp_path / 'foo.parquet') , streaming=_UpperCAmelCase).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32')}), None),
(Features({'image': Image(), 'foo': Value('int32')}), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio())}), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
assert get_writer_batch_size(_UpperCAmelCase) == expected
| 327 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = TFCamembertModel.from_pretrained('jplu/tf-camembert-base')
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE = model(a)['last_hidden_state']
SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10, 768))
self.assertEqual(output.shape , a)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 327 | 1 |
from math import ceil
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = list(range(0 ,__UpperCamelCase ) )
A_ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
A_ = []
for i in device_map_blocks:
if device_map_blocks.count(__UpperCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__UpperCamelCase )
# Missing blocks
A_ = [i for i in blocks if i not in device_map_blocks]
A_ = [i for i in device_map_blocks if i not in blocks]
if len(__UpperCamelCase ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(__UpperCamelCase ) )
if len(__UpperCamelCase ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(__UpperCamelCase ) )
if len(__UpperCamelCase ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(__UpperCamelCase ) )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = list(range(__UpperCamelCase ) )
A_ = int(ceil(n_layers / len(__UpperCamelCase ) ) )
A_ = [layers[i : i + n_blocks] for i in range(0 ,__UpperCamelCase ,__UpperCamelCase )]
return dict(zip(__UpperCamelCase ,__UpperCamelCase ) ) | 312 |
import functools
from typing import Any
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : list[str] ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or len(__UpperCamelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not all(
isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
A_ = {}
A_ = "WORD_KEEPER"
for word in words:
A_ = trie
for c in word:
if c not in trie_node:
A_ = {}
A_ = trie_node[c]
A_ = True
A_ = len(__UpperCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__UpperCamelCase : int ) -> bool:
if index == len_string:
return True
A_ = trie
for i in range(__UpperCamelCase ,__UpperCamelCase ):
A_ = trie_node.get(string[i] ,__UpperCamelCase )
if trie_node is None:
return False
if trie_node.get(__UpperCamelCase ,__UpperCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 312 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = list(__snake_case )
A : List[Any] = list(__snake_case )
A : Optional[int] = 0
for i in range(len(__snake_case ) ):
if lista[i] != lista[i]:
count += 1
A : Any = '''_'''
if count > 1:
return False
else:
return "".join(__snake_case )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = []
while True:
A : List[str] = ['''$'''] * len(__snake_case )
A : Optional[int] = []
for i in range(len(__snake_case ) ):
for j in range(i + 1 , len(__snake_case ) ):
A : Optional[Any] = compare_string(binary[i] , binary[j] )
if k is False:
A : Optional[Any] = '''*'''
A : List[Any] = '''*'''
temp.append('''X''' )
for i in range(len(__snake_case ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__snake_case ) == 0:
return pi
A : Optional[int] = list(set(__snake_case ) )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Any = []
for minterm in minterms:
A : Any = ''''''
for _ in range(__snake_case ):
A : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(__snake_case )
return temp
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : str = list(__snake_case )
A : Any = list(__snake_case )
A : Optional[Any] = 0
for i in range(len(__snake_case ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Union[str, Any] = []
A : str = [0] * len(__snake_case )
for i in range(len(chart[0] ) ):
A : int = 0
A : Any = -1
for j in range(len(__snake_case ) ):
if chart[j][i] == 1:
count += 1
A : Dict = j
if count == 1:
A : Union[str, Any] = 1
for i in range(len(__snake_case ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__snake_case ) ):
A : str = 0
temp.append(prime_implicants[i] )
while True:
A : Any = 0
A : int = -1
A : Tuple = 0
for i in range(len(__snake_case ) ):
A : Any = chart[i].count(1 )
if count_n > max_n:
A : Any = count_n
A : str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__snake_case ) ):
A : Tuple = 0
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = [[0 for x in range(len(__snake_case ) )] for x in range(len(__snake_case ) )]
for i in range(len(__snake_case ) ):
A : Tuple = prime_implicants[i].count('''_''' )
for j in range(len(__snake_case ) ):
if is_for_table(prime_implicants[i] , binary[j] , __snake_case ):
A : Optional[Any] = 1
return chart
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Tuple = int(input('''Enter the no. of variables\n''' ) )
A : str = [
float(__snake_case )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
A : List[str] = decimal_to_binary(__snake_case , __snake_case )
A : Optional[int] = check(__snake_case )
print('''Prime Implicants are:''' )
print(__snake_case )
A : Tuple = prime_implicant_chart(__snake_case , __snake_case )
A : int = selection(__snake_case , __snake_case )
print('''Essential Prime Implicants are:''' )
print(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 356 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
A : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE , )
A : Optional[Any] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A : Tuple = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ), "This is a local test"
| 311 | 0 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowerCAmelCase__ = re.compile(R'''([A-Z]+)([A-Z][a-z])''')
lowerCAmelCase__ = re.compile(R'''([a-z\d])([A-Z])''')
lowerCAmelCase__ = re.compile(R'''(?<!_)_(?!_)''')
lowerCAmelCase__ = re.compile(R'''(_{2,})''')
lowerCAmelCase__ = R'''^\w+(\.\w+)*$'''
lowerCAmelCase__ = R'''<>:/\|?*'''
def _A ( A__ ):
"""simple docstring"""
__lowercase = _uppercase_uppercase_re.sub(R'''\1_\2''' , A__ )
__lowercase = _lowercase_uppercase_re.sub(R'''\1_\2''' , A__ )
return name.lower()
def _A ( A__ ):
"""simple docstring"""
__lowercase = _single_underscore_re.split(A__ )
__lowercase = [_multiple_underscores_re.split(A__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A__ ) if n != '''''' )
def _A ( A__ ):
"""simple docstring"""
if os.path.basename(A__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(A__ )
def _A ( A__ , A__ ):
"""simple docstring"""
if os.path.basename(A__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , A__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(A__ )}-{split}"
def _A ( A__ , A__ , A__ , A__=None ):
"""simple docstring"""
__lowercase = filename_prefix_for_split(A__ , A__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
__lowercase = os.path.join(A__ , A__ )
return F"{filepath}*"
def _A ( A__ , A__ , A__ , A__=None , A__=None ):
"""simple docstring"""
__lowercase = filename_prefix_for_split(A__ , A__ )
__lowercase = os.path.join(A__ , A__ )
if shard_lengths:
__lowercase = len(A__ )
__lowercase = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(A__ )]
if filetype_suffix:
__lowercase = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
__lowercase = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 104 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase__ = CLIPImageProcessor()
lowerCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
lowerCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 104 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {"""vocab_file""": """sentencepiece.model"""}
_lowerCamelCase : Tuple = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
_lowerCamelCase : Optional[Any] = {
"""google/rembert""": 256,
}
class __UpperCAmelCase ( lowercase__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[Any]="[CLS]" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Optional[Any]="[UNK]" , _lowerCAmelCase : Union[str, Any]="[SEP]" , _lowerCAmelCase : int="[PAD]" , _lowerCAmelCase : str="[CLS]" , _lowerCAmelCase : Tuple="[MASK]" , **_lowerCAmelCase : List[str] , ):
super().__init__(
do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor()
self.sp_model.Load(lowercase_ )
@property
def A (self : Union[str, Any] ):
return len(self.sp_model )
def A (self : int ):
A = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Dict ):
A = self.__dict__.copy()
A = None
return state
def __setstate__(self : List[Any] , _lowerCAmelCase : Any ):
A = d
A = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def A (self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Dict=False ):
A = self.sp_model.EncodeAsPieces(lowercase_ )
return pieces
def A (self : str , _lowerCAmelCase : int ):
return self.sp_model.PieceToId(lowercase_ )
def A (self : Tuple , _lowerCAmelCase : Any ):
return self.sp_model.IdToPiece(lowercase_ )
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = self.sp_model.decode_pieces(lowercase_ )
return out_string
def A (self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A (self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Any] = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1]
def A (self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A (self : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any = None ):
if not os.path.isdir(lowercase_ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowercase_ ) )
return
A = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 351 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
_lowerCamelCase : Dict = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
_lowerCamelCase : Optional[Any] = {
'ctrl': 256,
}
_lowerCamelCase : List[str] = {
'Pregnancy': 16_8629,
'Christianity': 7675,
'Explain': 10_6423,
'Fitness': 6_3440,
'Saving': 6_3163,
'Ask': 2_7171,
'Ass': 9_5985,
'Joke': 16_3509,
'Questions': 4_5622,
'Thoughts': 4_9605,
'Retail': 5_2342,
'Feminism': 16_4338,
'Writing': 1_1992,
'Atheism': 19_2263,
'Netflix': 4_8616,
'Computing': 3_9639,
'Opinion': 4_3213,
'Alone': 4_4967,
'Funny': 5_8917,
'Gaming': 4_0358,
'Human': 4088,
'India': 1331,
'Joker': 7_7138,
'Diet': 3_6206,
'Legal': 1_1859,
'Norman': 4939,
'Tip': 7_2689,
'Weight': 5_2343,
'Movies': 4_6273,
'Running': 2_3425,
'Science': 2090,
'Horror': 3_7793,
'Confession': 6_0572,
'Finance': 1_2250,
'Politics': 1_6360,
'Scary': 19_1985,
'Support': 1_2654,
'Technologies': 3_2516,
'Teenage': 6_6160,
'Event': 3_2769,
'Learned': 6_7460,
'Notion': 18_2770,
'Wikipedia': 3_7583,
'Books': 6665,
'Extract': 7_6050,
'Confessions': 10_2701,
'Conspiracy': 7_5932,
'Links': 6_3674,
'Narcissus': 15_0425,
'Relationship': 5_4766,
'Relationships': 13_4796,
'Reviews': 4_1671,
'News': 4256,
'Translation': 2_6820,
'multilingual': 12_8406,
}
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
A = set(UpperCAmelCase )
return pairs
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = CONTROL_CODES
def __init__(self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any]="<unk>" , **_lowerCAmelCase : Dict ):
super().__init__(unk_token=_lowerCAmelCase , **_lowerCAmelCase )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
A = json.load(_lowerCAmelCase )
A = {v: k for k, v in self.encoder.items()}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
A = merges_handle.read().split("""\n""" )[1:-1]
A = [tuple(merge.split() ) for merge in merges]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = {}
@property
def A (self : Tuple ):
return len(self.encoder )
def A (self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def A (self : Optional[int] , _lowerCAmelCase : Optional[int] ):
if token in self.cache:
return self.cache[token]
A = tuple(_lowerCAmelCase )
A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
A = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(_lowerCAmelCase ):
try:
A = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(_lowerCAmelCase )
A = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
A = get_pairs(_lowerCAmelCase )
A = """@@ """.join(_lowerCAmelCase )
A = word[:-4]
A = word
return word
def A (self : List[str] , _lowerCAmelCase : Dict ):
A = []
A = re.findall(r"""\S+\n?""" , _lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def A (self : str , _lowerCAmelCase : int ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def A (self : Dict , _lowerCAmelCase : str ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = """ """.join(_lowerCAmelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
A = 0
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
A = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 337 | 0 |
import math
import unittest
def __magic_name__ ( A : int ):
'''simple docstring'''
assert isinstance(A, A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
with self.assertRaises(__lowerCamelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 107 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __A ( unittest.TestCase ):
def _lowercase (self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase (self : str ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def _lowercase (self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _lowercase (self : Any ):
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowercase (self : Optional[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(__a )
def _lowercase (self : Any ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase_ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowercase (self : str ):
UpperCAmelCase_ = self.dummy_cond_unet_upscale
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_ = self.dummy_vae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase_ = unet.half()
UpperCAmelCase_ = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase_ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _lowercase (self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = "a cat sitting on a park bench"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 1 | 0 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="attention"):
SCREAMING_SNAKE_CASE = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
SCREAMING_SNAKE_CASE = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
SCREAMING_SNAKE_CASE = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
SCREAMING_SNAKE_CASE = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
if split_mlp_wi:
SCREAMING_SNAKE_CASE = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
SCREAMING_SNAKE_CASE = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
SCREAMING_SNAKE_CASE = (wi_a, wi_a)
else:
SCREAMING_SNAKE_CASE = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
SCREAMING_SNAKE_CASE = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def lowerCamelCase__ (_UpperCAmelCase , *, _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = traverse_util.flatten_dict(variables['target'])
SCREAMING_SNAKE_CASE = {'/'.join(_UpperCAmelCase): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
SCREAMING_SNAKE_CASE = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = collections.OrderedDict()
# Shared embeddings.
SCREAMING_SNAKE_CASE = old['token_embedder/embedding']
# Encoder.
for i in range(_UpperCAmelCase):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , 'encoder' , 'pre_attention_layer_norm')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , 'encoder' , 'attention')
SCREAMING_SNAKE_CASE = layer_norm
SCREAMING_SNAKE_CASE = k.T
SCREAMING_SNAKE_CASE = o.T
SCREAMING_SNAKE_CASE = q.T
SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (MLP).
SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , 'encoder' , 'pre_mlp_layer_norm')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_mlp_lookup(_UpperCAmelCase , _UpperCAmelCase , 'encoder' , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE = wi[0].T
SCREAMING_SNAKE_CASE = wi[1].T
else:
SCREAMING_SNAKE_CASE = wi.T
SCREAMING_SNAKE_CASE = wo.T
SCREAMING_SNAKE_CASE = old[
'encoder/relpos_bias/rel_embedding'
].T
SCREAMING_SNAKE_CASE = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(_UpperCAmelCase):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , 'decoder' , 'pre_self_attention_layer_norm')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , 'decoder' , 'self_attention')
SCREAMING_SNAKE_CASE = layer_norm
SCREAMING_SNAKE_CASE = k.T
SCREAMING_SNAKE_CASE = o.T
SCREAMING_SNAKE_CASE = q.T
SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (Cross Attention).
SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , 'decoder' , 'pre_cross_attention_layer_norm')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , 'decoder' , 'encoder_decoder_attention')
SCREAMING_SNAKE_CASE = layer_norm
SCREAMING_SNAKE_CASE = k.T
SCREAMING_SNAKE_CASE = o.T
SCREAMING_SNAKE_CASE = q.T
SCREAMING_SNAKE_CASE = v.T
# Block i, layer 2 (MLP).
SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , 'decoder' , 'pre_mlp_layer_norm')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_mlp_lookup(_UpperCAmelCase , _UpperCAmelCase , 'decoder' , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE = wi[0].T
SCREAMING_SNAKE_CASE = wi[1].T
else:
SCREAMING_SNAKE_CASE = wi.T
SCREAMING_SNAKE_CASE = wo.T
SCREAMING_SNAKE_CASE = old['decoder/decoder_norm/scale']
SCREAMING_SNAKE_CASE = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
SCREAMING_SNAKE_CASE = old['decoder/logits_dense/kernel'].T
return new
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()])
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.')
SCREAMING_SNAKE_CASE = state_dict['shared.weight']
return state_dict
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = convert_tax_to_pytorch(_UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = make_state_dict(_UpperCAmelCase , _UpperCAmelCase)
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False):
SCREAMING_SNAKE_CASE = TaConfig.from_json_file(_UpperCAmelCase)
print(F'''Building PyTorch model from configuration: {config}''')
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
SCREAMING_SNAKE_CASE = TaEncoderModel(_UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE = TaForConditionalGeneration(_UpperCAmelCase)
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''')
model.save_pretrained(_UpperCAmelCase)
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCAmelCase)
print('Done')
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
a_ : Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 327 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = TFCamembertModel.from_pretrained('jplu/tf-camembert-base')
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE = model(a)['last_hidden_state']
SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10, 768))
self.assertEqual(output.shape , a)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 327 | 1 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int = 50 ):
'''simple docstring'''
_lowerCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 158 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_SCREAMING_SNAKE_CASE = datasets.utils.logging.get_logger(__name__)
class lowerCAmelCase_ ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCamelCase : bool = None
__lowerCamelCase : bool = None
class lowerCAmelCase_ ( folder_based_builder.FolderBasedBuilder ):
__lowerCamelCase : Tuple = datasets.Audio()
__lowerCamelCase : List[str] = "audio"
__lowerCamelCase : Optional[int] = AudioFolderConfig
__lowerCamelCase : List[str] # definition at the bottom of the script
__lowerCamelCase : Optional[int] = AudioClassification(audio_column="audio" ,label_column="label" )
_SCREAMING_SNAKE_CASE = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
_SCREAMING_SNAKE_CASE = AUDIO_EXTENSIONS
| 158 | 1 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=4 , ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : Tuple = is_training
snake_case_ : List[str] = use_attention_mask
snake_case_ : Any = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : Optional[int] = type_vocab_size
snake_case_ : List[Any] = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : Dict = num_choices
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Any = None
if self.use_attention_mask:
snake_case_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[Any] = None
if self.use_token_type_ids:
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : List[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs
snake_case_ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Dict = model_class_name.from_pretrained('''albert-base-v2''' )
snake_case_ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
snake_case_ : Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case_ : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ )[0]
snake_case_ : Tuple = (1, 11, 768)
self.assertEqual(output.shape , __magic_name__ )
snake_case_ : str = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
| 279 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=4 , ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : Tuple = is_training
snake_case_ : List[str] = use_attention_mask
snake_case_ : Any = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[int] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : Optional[int] = type_vocab_size
snake_case_ : List[Any] = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : Dict = num_choices
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Any = None
if self.use_attention_mask:
snake_case_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[Any] = None
if self.use_token_type_ids:
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : List[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs
snake_case_ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Dict = model_class_name.from_pretrained('''albert-base-v2''' )
snake_case_ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
snake_case_ : Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case_ : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ )[0]
snake_case_ : Tuple = (1, 11, 768)
self.assertEqual(output.shape , __magic_name__ )
snake_case_ : str = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
| 279 | 1 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = None
_A = BloomTokenizerFast
_A = BloomTokenizerFast
_A = True
_A = False
_A = 'tokenizer_file'
_A = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def _lowerCamelCase ( self :List[str] ) -> List[Any]:
super().setUp()
__UpperCamelCase : Tuple = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self :str , **a :List[str] ) -> str:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCamelCase ( self :int ) -> Dict:
__UpperCamelCase : List[Any] = self.get_rust_tokenizer()
__UpperCamelCase : str = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
__UpperCamelCase : int = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
__UpperCamelCase : Tuple = tokenizer.batch_encode_plus(__lowerCAmelCase )["input_ids"]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase : int = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self :Optional[int] , a :Optional[int]=6 ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__UpperCamelCase : Union[str, Any] = "This is a simple input"
__UpperCamelCase : Any = ["This is a simple input 1", "This is a simple input 2"]
__UpperCamelCase : int = ("This is a simple input", "This is a pair")
__UpperCamelCase : Dict = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(__lowerCAmelCase , max_length=__lowerCAmelCase )
tokenizer_r.encode_plus(__lowerCAmelCase , max_length=__lowerCAmelCase )
tokenizer_r.batch_encode_plus(__lowerCAmelCase , max_length=__lowerCAmelCase )
tokenizer_r.encode(__lowerCAmelCase , max_length=__lowerCAmelCase )
tokenizer_r.batch_encode_plus(__lowerCAmelCase , max_length=__lowerCAmelCase )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
__UpperCamelCase : Optional[int] = None # Hotfixing padding = None
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" , )
def _lowerCamelCase ( self :Optional[int] ) -> Dict:
__UpperCamelCase : str = self.get_rust_tokenizer()
__UpperCamelCase : Optional[int] = load_dataset("xnli" , "all_languages" , split="test" , streaming=__lowerCAmelCase )
__UpperCamelCase : str = next(iter(__lowerCAmelCase ) )["premise"] # pick up one data
__UpperCamelCase : Any = list(sample_data.values() )
__UpperCamelCase : List[str] = list(map(tokenizer.encode , __lowerCAmelCase ) )
__UpperCamelCase : int = [tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) for x in output_tokens]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self :str ) -> Optional[Any]:
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 ) | 232 |
def lowerCAmelCase__(__snake_case ) -> str:
'''simple docstring'''
return "".join(chr(ord(__snake_case ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 209 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class _snake_case ( lowercase__):
UpperCamelCase__ : List[Any] ="""t5"""
UpperCamelCase__ : Any =["""past_key_values"""]
UpperCamelCase__ : Union[str, Any] ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Dict, __lowercase : Union[str, Any]=3_2128, __lowercase : Optional[Any]=512, __lowercase : List[Any]=64, __lowercase : int=2048, __lowercase : Dict=6, __lowercase : Tuple=None, __lowercase : List[str]=8, __lowercase : int=32, __lowercase : str=128, __lowercase : Optional[Any]=0.1, __lowercase : Tuple=1e-6, __lowercase : Dict=1.0, __lowercase : Optional[int]="relu", __lowercase : List[Any]=True, __lowercase : Optional[int]=True, __lowercase : Any=0, __lowercase : List[Any]=1, **__lowercase : Dict, ):
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_kv
lowercase__ = d_ff
lowercase__ = num_layers
lowercase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ = num_heads
lowercase__ = relative_attention_num_buckets
lowercase__ = relative_attention_max_distance
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_factor
lowercase__ = feed_forward_proj
lowercase__ = use_cache
lowercase__ = self.feed_forward_proj.split("-" )
lowercase__ = act_info[-1]
lowercase__ = act_info[0] == "gated"
if len(__lowercase ) > 1 and act_info[0] != "gated" or len(__lowercase ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase__ = "gelu_new"
super().__init__(
pad_token_id=__lowercase, eos_token_id=__lowercase, is_encoder_decoder=__lowercase, **__lowercase, )
class _snake_case ( lowercase__):
@property
def A__ ( self : List[Any] ):
lowercase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowercase__ = "past_encoder_sequence + sequence"
lowercase__ = {0: "batch"}
lowercase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowercase__ = {0: "batch", 1: "decoder_sequence"}
lowercase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowercase, direction="inputs" )
return common_inputs
@property
def A__ ( self : Tuple ):
return 13
| 224 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def __lowerCAmelCase ( ):
lowercase__ = 9
lowercase__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowercase__ = kruskal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(SCREAMING_SNAKE_CASE_ ) == sorted(SCREAMING_SNAKE_CASE_ )
| 224 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase ( a_ , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = LEDTokenizer
_A : Any = LEDTokenizerFast
_A : int = True
def lowerCAmelCase_ ( self: Tuple ) -> Any:
super().setUp()
snake_case_ :List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case_ :Any = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ :List[str] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case_ :Union[str, Any] = {"""unk_token""": """<unk>"""}
snake_case_ :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
def lowerCAmelCase_ ( self: List[str] , **snake_case: Optional[Any] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase_ ( self: Dict , **snake_case: Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase_ ( self: Any , snake_case: Optional[Any] ) -> Any:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase_ ( self: str ) -> Optional[Any]:
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCAmelCase_ ( self: List[Any] ) -> List[str]:
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCAmelCase_ ( self: List[Any] ) -> str:
snake_case_ :List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
snake_case_ :Any = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ :str = tokenizer(_lowercase , max_length=len(_lowercase ) , padding=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
snake_case_ :Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase , _lowercase )
@require_torch
def lowerCAmelCase_ ( self: str ) -> str:
snake_case_ :Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ :Dict = tokenizer(_lowercase , padding=_lowercase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , _lowercase )
self.assertIn("""attention_mask""" , _lowercase )
self.assertNotIn("""labels""" , _lowercase )
self.assertNotIn("""decoder_attention_mask""" , _lowercase )
@require_torch
def lowerCAmelCase_ ( self: List[Any] ) -> int:
snake_case_ :Optional[int] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ :Dict = tokenizer(text_target=_lowercase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCAmelCase_ ( self: str ) -> List[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ :List[Any] = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=_lowercase , truncation=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
snake_case_ :Union[str, Any] = ["""A long paragraph for summarization."""]
snake_case_ :Optional[int] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ :str = tokenizer(_lowercase , return_tensors="""pt""" )
snake_case_ :Optional[Any] = tokenizer(text_target=_lowercase , return_tensors="""pt""" )
snake_case_ :Any = inputs["""input_ids"""]
snake_case_ :int = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase_ ( self: str ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ :Optional[int] = ["""Summary of the text.""", """Another summary."""]
snake_case_ :int = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
snake_case_ :Optional[Any] = tokenizer(_lowercase , padding=_lowercase )
snake_case_ :List[str] = [[0] * len(_lowercase ) for x in encoded_output["""input_ids"""]]
snake_case_ :Optional[int] = tokenizer.pad(_lowercase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , _lowercase )
def lowerCAmelCase_ ( self: str ) -> str:
pass
def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case_ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
snake_case_ :str = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
snake_case_ :Dict = """A, <mask> AllenNLP sentence."""
snake_case_ :Dict = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
snake_case_ :Tuple = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case_ :List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case_ :List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 66 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
__snake_case : Any = logging.getLogger(__name__)
__snake_case : Any = {"""facebook/bart-base""": BartForConditionalGeneration}
__snake_case : Tuple = {"""facebook/bart-base""": BartTokenizer}
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[str] = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""")
parser.add_argument(
"""--validation_file""" , type=a__ , default=a__ , help="""A csv or a json file containing the validation data.""")
parser.add_argument(
"""--max_length""" , type=a__ , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=a__ , default=a__ , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=a__ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=a__ , )
parser.add_argument(
"""--config_name""" , type=a__ , default=a__ , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=a__ , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=a__ , default=a__ , help="""Where to store the final ONNX file.""")
a_ : Any = parser.parse_args()
return args
def _UpperCAmelCase ( a__ , a__="cpu"):
'''simple docstring'''
a_ : Optional[int] = model_dict[model_name].from_pretrained(a__).to(a__)
a_ : List[str] = tokenizer_dict[model_name].from_pretrained(a__)
if model_name in ["facebook/bart-base"]:
a_ : Tuple = 0
a_ : Optional[int] = None
a_ : Union[str, Any] = 0
return huggingface_model, tokenizer
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__):
'''simple docstring'''
model.eval()
a_ : Optional[Any] = None
a_ : Optional[Any] = torch.jit.script(BARTBeamSearchGenerator(a__))
with torch.no_grad():
a_ : Any = """My friends are cool but they eat too many carbs."""
a_ : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="""pt""").to(model.device)
a_ : Optional[int] = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=a__ , max_length=a__ , early_stopping=a__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
a__ , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , a__ , opset_version=1_4 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=a__ , )
logger.info("""Model exported to {}""".format(a__))
a_ : List[str] = remove_dup_initializers(os.path.abspath(a__))
logger.info("""Deduplicated and optimized model written to {}""".format(a__))
a_ : Union[str, Any] = onnxruntime.InferenceSession(a__)
a_ : Any = ort_sess.run(
a__ , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(a__),
"""max_length""": np.array(a__),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3)
logger.info("""Model outputs from torch and ONNX Runtime are similar.""")
logger.info("""Success.""")
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[str] = parse_args()
a_ : str = 5
a_ : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO)
transformers.utils.logging.set_verbosity_error()
a_ : int = torch.device(args.device)
a_ , a_ : Optional[Any] = load_model_tokenizer(args.model_name_or_path , a__)
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""")
model.to(a__)
if args.max_length:
a_ : List[str] = args.max_length
if args.num_beams:
a_ : Optional[Any] = args.num_beams
if args.output_file_path:
a_ : Optional[int] = args.output_file_path
else:
a_ : Tuple = """BART.onnx"""
logger.info("""Exporting model to ONNX""")
export_and_validate_model(a__ , a__ , a__ , a__ , a__)
if __name__ == "__main__":
main()
| 248 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """beit"""
def __init__( self : Dict , __lowercase : List[str]=81_92 , __lowercase : str=7_68 , __lowercase : Optional[Any]=12 , __lowercase : Dict=12 , __lowercase : List[str]=30_72 , __lowercase : Dict="gelu" , __lowercase : List[Any]=0.0 , __lowercase : Any=0.0 , __lowercase : str=0.02 , __lowercase : Union[str, Any]=1e-12 , __lowercase : Dict=2_24 , __lowercase : List[Any]=16 , __lowercase : Dict=3 , __lowercase : str=False , __lowercase : Optional[int]=False , __lowercase : Union[str, Any]=False , __lowercase : List[str]=False , __lowercase : int=0.1 , __lowercase : Dict=0.1 , __lowercase : List[str]=True , __lowercase : int=[3, 5, 7, 11] , __lowercase : int=[1, 2, 3, 6] , __lowercase : List[str]=True , __lowercase : Optional[Any]=0.4 , __lowercase : List[Any]=2_56 , __lowercase : Dict=1 , __lowercase : Tuple=False , __lowercase : Optional[int]=2_55 , **__lowercase : Optional[int] , ) -> Optional[Any]:
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE__ : str =vocab_size
SCREAMING_SNAKE_CASE__ : Any =hidden_size
SCREAMING_SNAKE_CASE__ : Any =num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict =image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] =num_channels
SCREAMING_SNAKE_CASE__ : int =use_mask_token
SCREAMING_SNAKE_CASE__ : Optional[Any] =use_absolute_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] =use_relative_position_bias
SCREAMING_SNAKE_CASE__ : List[Any] =use_shared_relative_position_bias
SCREAMING_SNAKE_CASE__ : Dict =layer_scale_init_value
SCREAMING_SNAKE_CASE__ : List[Any] =drop_path_rate
SCREAMING_SNAKE_CASE__ : int =use_mean_pooling
# decode head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : Optional[Any] =out_indices
SCREAMING_SNAKE_CASE__ : List[str] =pool_scales
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : List[Any] =use_auxiliary_head
SCREAMING_SNAKE_CASE__ : Optional[int] =auxiliary_loss_weight
SCREAMING_SNAKE_CASE__ : str =auxiliary_channels
SCREAMING_SNAKE_CASE__ : List[Any] =auxiliary_num_convs
SCREAMING_SNAKE_CASE__ : Tuple =auxiliary_concat_input
SCREAMING_SNAKE_CASE__ : List[str] =semantic_loss_ignore_index
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = version.parse("""1.11""" )
@property
def __magic_name__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __magic_name__ ( self : Union[str, Any] ) -> float:
return 1e-4 | 368 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a_ = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
a_ = {'facebook/blenderbot_small-90M': 5_1_2}
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =set()
SCREAMING_SNAKE_CASE__ : Optional[Any] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =char
SCREAMING_SNAKE_CASE__ : Any =set(UpperCamelCase__ )
return pairs
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , __lowercase : int , __lowercase : Optional[int] , __lowercase : List[str]="__start__" , __lowercase : Union[str, Any]="__end__" , __lowercase : str="__unk__" , __lowercase : Union[str, Any]="__null__" , **__lowercase : List[str] , ) -> Optional[int]:
super().__init__(unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , **__lowercase )
with open(__lowercase , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : Any =json.load(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict ={v: k for k, v in self.encoder.items()}
with open(__lowercase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ : int =merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE__ : List[Any] =[tuple(merge.split() ) for merge in merges]
SCREAMING_SNAKE_CASE__ : int =dict(zip(__lowercase , range(len(__lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : Dict ={}
@property
def __magic_name__ ( self : Any ) -> int:
return len(self.encoder )
def __magic_name__ ( self : Tuple ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : Optional[Any] , __lowercase : str ) -> str:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =re.sub('''([.,!?()])''' , r''' \1''' , __lowercase )
SCREAMING_SNAKE_CASE__ : int =re.sub('''(\')''' , r''' \1 ''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Dict =re.sub(r'''\s{2,}''' , ''' ''' , __lowercase )
if "\n" in token:
SCREAMING_SNAKE_CASE__ : List[str] =token.replace('''\n''' , ''' __newln__''' )
SCREAMING_SNAKE_CASE__ : Dict =token.split(''' ''' )
SCREAMING_SNAKE_CASE__ : Dict =[]
for token in tokens:
if not len(__lowercase ):
continue
SCREAMING_SNAKE_CASE__ : Union[str, Any] =token.lower()
SCREAMING_SNAKE_CASE__ : Optional[Any] =tuple(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ : Optional[Any] =get_pairs(__lowercase )
if not pairs:
words.append(__lowercase )
continue
while True:
SCREAMING_SNAKE_CASE__ : int =min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =bigram
SCREAMING_SNAKE_CASE__ : List[Any] =[]
SCREAMING_SNAKE_CASE__ : Tuple =0
while i < len(__lowercase ):
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] =word.index(__lowercase , __lowercase )
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ : Optional[int] =j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ : int =tuple(__lowercase )
SCREAMING_SNAKE_CASE__ : int =new_word
if len(__lowercase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ : Any =get_pairs(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] ='''@@ '''.join(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =word[:-4]
SCREAMING_SNAKE_CASE__ : str =word
words.append(__lowercase )
return " ".join(__lowercase )
def __magic_name__ ( self : str , __lowercase : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Dict =[]
SCREAMING_SNAKE_CASE__ : List[Any] =re.findall(r'''\S+\n?''' , __lowercase )
for token in words:
split_tokens.extend(list(self.bpe(__lowercase ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : List[Any] , __lowercase : str ) -> int:
SCREAMING_SNAKE_CASE__ : int =token.lower()
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : int , __lowercase : int ) -> str:
return self.decoder.get(__lowercase , self.unk_token )
def __magic_name__ ( self : List[Any] , __lowercase : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] =''' '''.join(__lowercase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE__ : str =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : int =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '''\n''' )
SCREAMING_SNAKE_CASE__ : List[str] =0
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE__ : Any =token_index
writer.write(''' '''.join(__lowercase ) + '''\n''' )
index += 1
return vocab_file, merge_file | 222 | 0 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 166 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def _A ( ):
"""simple docstring"""
__lowercase =_ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase =get_sagemaker_input()
else:
__lowercase =get_cluster_input()
return config
def _A ( _lowerCAmelCase=None ):
"""simple docstring"""
if subparsers is not None:
__lowercase =subparsers.add_parser('config' , description=_lowerCAmelCase )
else:
__lowercase =argparse.ArgumentParser('Accelerate config command' , description=_lowerCAmelCase )
parser.add_argument(
'--config_file' , default=_lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =get_user_input()
if args.config_file is not None:
__lowercase =args.config_file
else:
if not os.path.isdir(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
__lowercase =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_lowerCAmelCase )
else:
config.to_yaml_file(_lowerCAmelCase )
print(f"""accelerate configuration saved at {config_file}""" )
def _A ( ):
"""simple docstring"""
__lowercase =config_command_parser()
__lowercase =parser.parse_args()
config_command(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 166 | 1 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
UpperCamelCase = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
UpperCamelCase = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def __lowerCamelCase ( snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = set()
_SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_SCREAMING_SNAKE_CASE = char
_SCREAMING_SNAKE_CASE = set(snake_case__ )
return pairs
class __UpperCAmelCase (snake_case_ ):
__snake_case : Optional[Any] = VOCAB_FILES_NAMES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Dict="<s>" , UpperCAmelCase_: Optional[Any]="</s>" , UpperCAmelCase_: Dict="</s>" , UpperCAmelCase_: Dict="<s>" , UpperCAmelCase_: Dict="<unk>" , UpperCAmelCase_: List[Any]="<pad>" , UpperCAmelCase_: Optional[Any]="<mask>" , **UpperCAmelCase_: str , ):
'''simple docstring'''
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = merges_file
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 3
self.add_from_file(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
_SCREAMING_SNAKE_CASE = merges_handle.read().split("""\n""" )[:-1]
_SCREAMING_SNAKE_CASE = [tuple(merge.split()[:-1] ) for merge in merges]
_SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
_SCREAMING_SNAKE_CASE = {}
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None , UpperCAmelCase_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[int] , UpperCAmelCase_: Optional[List[int]] = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase ( self: str ):
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase ( self: int ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_SCREAMING_SNAKE_CASE = tuple(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_SCREAMING_SNAKE_CASE = get_pairs(UpperCAmelCase_ )
if not pairs:
return token
while True:
_SCREAMING_SNAKE_CASE = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_SCREAMING_SNAKE_CASE = bigram
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
while i < len(UpperCAmelCase_ ):
try:
_SCREAMING_SNAKE_CASE = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_SCREAMING_SNAKE_CASE = tuple(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
_SCREAMING_SNAKE_CASE = get_pairs(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = '@@ '.join(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = word[:-4]
_SCREAMING_SNAKE_CASE = word
return word
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = re.findall(R"""\S+\n?""" , UpperCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Tuple ):
'''simple docstring'''
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self: str , UpperCAmelCase_: Any ):
'''simple docstring'''
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def UpperCamelCase ( self: str , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ' '.join(UpperCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.merges_file , UpperCAmelCase_ )
return out_vocab_file, out_merge_file
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
try:
with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(UpperCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
_SCREAMING_SNAKE_CASE = f.readlines()
for lineTmp in lines:
_SCREAMING_SNAKE_CASE = lineTmp.strip()
_SCREAMING_SNAKE_CASE = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected \'<token> <cnt>\'""" )
_SCREAMING_SNAKE_CASE = line[:idx]
_SCREAMING_SNAKE_CASE = len(self.encoder )
| 356 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[Any] = OpenAIGPTTokenizer
__snake_case : Dict = OpenAIGPTTokenizerFast
__snake_case : Optional[Any] = True
__snake_case : Dict = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_SCREAMING_SNAKE_CASE = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
_SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
_SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Dict ):
'''simple docstring'''
return "lower newer", "lower newer"
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_SCREAMING_SNAKE_CASE = """lower"""
_SCREAMING_SNAKE_CASE = ["""low""", """er</w>"""]
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokens + ["""<unk>"""]
_SCREAMING_SNAKE_CASE = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# Simple input
_SCREAMING_SNAKE_CASE = """This is a simple input"""
_SCREAMING_SNAKE_CASE = ["""This is a simple input 1""", """This is a simple input 2"""]
_SCREAMING_SNAKE_CASE = ("""This is a simple input""", """This is a pair""")
_SCREAMING_SNAKE_CASE = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __UpperCAmelCase (_UpperCAmelCase ):
pass
| 125 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : List[Any] = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : List[Any] = {
'''squeezebert/squeezebert-uncased''': 5_1_2,
'''squeezebert/squeezebert-mnli''': 5_1_2,
'''squeezebert/squeezebert-mnli-headless''': 5_1_2,
}
__UpperCamelCase : Any = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = SqueezeBertTokenizer
def __init__( self : Any ,lowercase_ : str=None ,lowercase_ : str=None ,lowercase_ : List[str]=True ,lowercase_ : Optional[Any]="[UNK]" ,lowercase_ : str="[SEP]" ,lowercase_ : Optional[Any]="[PAD]" ,lowercase_ : int="[CLS]" ,lowercase_ : Optional[int]="[MASK]" ,lowercase_ : Any=True ,lowercase_ : List[str]=None ,**lowercase_ : Optional[int] ,):
super().__init__(
lowercase_ ,tokenizer_file=lowercase_ ,do_lower_case=lowercase_ ,unk_token=lowercase_ ,sep_token=lowercase_ ,pad_token=lowercase_ ,cls_token=lowercase_ ,mask_token=lowercase_ ,tokenize_chinese_chars=lowercase_ ,strip_accents=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,lowercase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,lowercase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,lowercase_ ) != tokenize_chinese_chars
):
lowerCAmelCase__ : Union[str, Any] = getattr(lowercase_ ,normalizer_state.pop('''type''' ) )
lowerCAmelCase__ : Dict = do_lower_case
lowerCAmelCase__ : int = strip_accents
lowerCAmelCase__ : str = tokenize_chinese_chars
lowerCAmelCase__ : Optional[Any] = normalizer_class(**lowercase_ )
lowerCAmelCase__ : List[Any] = do_lower_case
def __lowerCAmelCase ( self : List[str] ,lowercase_ : Dict ,lowercase_ : int=None ):
lowerCAmelCase__ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : List[int] ,lowercase_ : Optional[List[int]] = None ):
lowerCAmelCase__ : int = [self.sep_token_id]
lowerCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Dict ,lowercase_ : str ,lowercase_ : Optional[str] = None ):
lowerCAmelCase__ : Optional[Any] = self._tokenizer.model.save(lowercase_ ,name=lowercase_ )
return tuple(lowercase_ )
| 106 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__UpperCamelCase : Tuple = TypeVar('''T''')
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
lowercase__ = 42 # Cache store of keys
lowercase__ = 42 # References of the keys in cache
lowercase__ = 10 # Maximum capacity of cache
def __init__( self : Dict ,lowercase_ : int ):
lowerCAmelCase__ : str = deque()
lowerCAmelCase__ : Any = set()
if not n:
lowerCAmelCase__ : Optional[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowerCAmelCase__ : int = n
def __lowerCAmelCase ( self : str ,lowercase_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCAmelCase__ : Any = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __lowerCAmelCase ( self : int ):
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Tuple ):
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 106 | 1 |
A_ :Any = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 361 |
import math
import tensorflow as tf
from packaging import version
def A ( a_ ) -> Optional[Any]:
__UpperCamelCase : Dict =tf.convert_to_tensor(a_ )
__UpperCamelCase : str =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) ,x.dtype ) ))
return x * cdf
def A ( a_ ) -> Union[str, Any]:
__UpperCamelCase : str =tf.convert_to_tensor(a_ )
__UpperCamelCase : Union[str, Any] =tf.cast(math.pi ,x.dtype )
__UpperCamelCase : List[str] =tf.cast(0.044_715 ,x.dtype )
__UpperCamelCase : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(a_ ,3 )) ))
return x * cdf
def A ( a_ ) -> Any:
__UpperCamelCase : str =tf.convert_to_tensor(a_ )
return x * tf.tanh(tf.math.softplus(a_ ) )
def A ( a_ ) -> Dict:
__UpperCamelCase : int =tf.convert_to_tensor(a_ )
__UpperCamelCase : Optional[int] =tf.cast(0.044_715 ,x.dtype )
__UpperCamelCase : List[str] =tf.cast(0.7_978_845_608 ,x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def A ( a_ ) -> List[str]:
__UpperCamelCase : List[Any] =tf.convert_to_tensor(a_ )
__UpperCamelCase : Optional[int] =tf.cast(1.702 ,x.dtype )
return x * tf.math.sigmoid(coeff * x )
def A ( a_ ) -> Tuple:
return tf.clip_by_value(_gelu(a_ ) ,-10 ,10 )
def A ( a_ ,a_=-1 ) -> Any:
__UpperCamelCase , __UpperCamelCase : List[Any] =tf.split(a_ ,2 ,axis=a_ )
return a * tf.math.sigmoid(a_ )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def A ( a_ ) -> Tuple:
return tf.keras.activations.gelu(a_ ,approximate=a_ )
A_ :int = tf.keras.activations.gelu
A_ :Any = approximate_gelu_wrap
else:
A_ :str = _gelu
A_ :Dict = _gelu_new
A_ :str = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def A ( a_ ) -> Dict:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 245 | 0 |
lowerCAmelCase__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = set()
# keep track of all the paths to be checked
lowercase__ : Dict = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase__ : str = queue.pop(0 )
# get the last node from the path
lowercase__ : Any = path[-1]
if node not in explored:
lowercase__ : Any = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase__ : Dict = list(_snake_case )
new_path.append(_snake_case )
queue.append(_snake_case )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_snake_case )
# in case there's no path between the 2 nodes
return []
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase__ : Any = [start]
lowercase__ : Tuple = set(_snake_case )
# Keep tab on distances from `start` node.
lowercase__ : Any = {start: 0, target: -1}
while queue:
lowercase__ : str = queue.pop(0 )
if node == target:
lowercase__ : List[Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_snake_case )
queue.append(_snake_case )
lowercase__ : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 130 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
return "".join(sorted(_snake_case ) )
def _snake_case ( _snake_case : str ) -> list[str]:
'''simple docstring'''
return word_by_signature[signature(_snake_case )]
a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
a = sorted({word.strip().lower() for word in data.splitlines()})
a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 315 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__A : Dict = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__A : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__A : str = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def __UpperCamelCase ( _A : str , _A : str ) ->tuple[str, float]:
"""simple docstring"""
lowerCamelCase_ =len([g for position, g in enumerate(_A ) if g == main_target[position]] )
return (item, float(_A ))
def __UpperCamelCase ( _A : str , _A : str ) ->tuple[str, str]:
"""simple docstring"""
lowerCamelCase_ =random.randint(0 , len(_A ) - 1 )
lowerCamelCase_ =parent_a[:random_slice] + parent_a[random_slice:]
lowerCamelCase_ =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __UpperCamelCase ( _A : str , _A : list[str] ) ->str:
"""simple docstring"""
lowerCamelCase_ =list(_A )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCamelCase_ =random.choice(_A )
return "".join(_A )
def __UpperCamelCase ( _A : tuple[str, float] , _A : list[tuple[str, float]] , _A : list[str] , ) ->list[str]:
"""simple docstring"""
lowerCamelCase_ =[]
# Generate more children proportionally to the fitness score.
lowerCamelCase_ =int(parent_a[1] * 100 ) + 1
lowerCamelCase_ =10 if child_n >= 10 else child_n
for _ in range(_A ):
lowerCamelCase_ =population_score[random.randint(0 , _A )][0]
lowerCamelCase_ , lowerCamelCase_ =crossover(parent_a[0] , _A )
# Append new string to the population list.
pop.append(mutate(_A , _A ) )
pop.append(mutate(_A , _A ) )
return pop
def __UpperCamelCase ( _A : str , _A : list[str] , _A : bool = True ) ->tuple[int, int, str]:
"""simple docstring"""
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
lowerCamelCase_ =f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_A )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCamelCase_ =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCamelCase_ =f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_A )
# Generate random starting population.
lowerCamelCase_ =[]
for _ in range(_A ):
population.append("""""".join([random.choice(_A ) for i in range(len(_A ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCamelCase_ , lowerCamelCase_ =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_A )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCamelCase_ =[evaluate(_A , _A ) for item in population]
# Check if there is a matching evolution.
lowerCamelCase_ =sorted(_A , key=lambda _A : x[1] , reverse=_A )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCamelCase_ =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_A )
# Normalize population score to be between 0 and 1.
lowerCamelCase_ =[
(item, score / len(_A )) for item, score in population_score
]
# This is selection
for i in range(_A ):
population.extend(select(population_score[int(_A )] , _A , _A ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_A ) > N_POPULATION:
break
if __name__ == "__main__":
__A : Tuple = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
__A : int = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
__A, __A, __A : List[Any] = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 49 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__A : List[str] = logging.getLogger(__name__)
def __UpperCamelCase ( ) ->int:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=_A , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=_A , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=_A , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=_A , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=_A , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=_A , type=_A , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=_A , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=_A , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
lowerCamelCase_ =parser.parse_args()
return args
def __UpperCamelCase ( _A : Dict ) ->Optional[int]:
"""simple docstring"""
def fn(_A : List[Any] ):
return tokenizer(examples["""text"""] )
return fn
def __UpperCamelCase ( _A : Dict ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =[]
for i in range(len(tokenized_data["""input_ids"""] ) ):
lowerCamelCase_ ={
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
lowerCamelCase_ =tf.train.Features(feature=_A )
lowerCamelCase_ =tf.train.Example(features=_A )
lowerCamelCase_ =example.SerializeToString()
records.append(_A )
return records
def __UpperCamelCase ( _A : Any ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCamelCase_ =min(len(_A ) , args.limit )
lowerCamelCase_ =dataset.select(range(_A ) )
print(f'Limiting the dataset to {args.limit} entries.' )
lowerCamelCase_ =AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCamelCase_ =os.path.join(args.output_dir , args.split )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
lowerCamelCase_ =os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCamelCase_ =tokenize_function(_A )
lowerCamelCase_ =dataset.map(_A , batched=_A , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_A : Any ):
# Concatenate all texts.
lowerCamelCase_ ={k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCamelCase_ =len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCamelCase_ =(total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCamelCase_ ={
k: [t[i : i + args.max_length] for i in range(0 , _A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCamelCase_ =dataset_tokenized.map(_A , batched=_A , batch_size=1000 , num_proc=4 )
lowerCamelCase_ =0
lowerCamelCase_ =0
for shard in range(0 , len(_A ) , args.shard_size ):
lowerCamelCase_ =grouped_dataset[shard : shard + args.shard_size]
lowerCamelCase_ =len(dataset_snapshot["""input_ids"""] )
lowerCamelCase_ =os.path.join(_A , f'dataset-{shard_count}-{records_containing}.tfrecord' )
lowerCamelCase_ =get_serialized_examples(_A )
with tf.io.TFRecordWriter(_A ) as out_file:
for i in range(len(_A ) ):
lowerCamelCase_ =serialized_examples[i]
out_file.write(_A )
print("""Wrote file {} containing {} records""".format(_A , _A ) )
shard_count += 1
total_records += records_containing
with open(f'split-{args.split}-records-count.txt' , """w""" ) as f:
print(f'Total {args.split} records: {total_records}' , file=_A )
if __name__ == "__main__":
__A : Dict = parse_args()
main(args)
| 49 | 1 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : Optional[Any] = hf_hub_url(repo_id=__SCREAMING_SNAKE_CASE , path=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(__SCREAMING_SNAKE_CASE )}"""
| 181 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
__lowerCAmelCase: Optional[Any] = hf_hub_url(repo_id=__SCREAMING_SNAKE_CASE , path=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__SCREAMING_SNAKE_CASE )}"
| 217 | 0 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(
A_, r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''', )
class UpperCAmelCase_ ( A_ ):
def __magic_name__ ( self : Tuple , snake_case_ : GenericTensor ) -> np.ndarray:
'''simple docstring'''
if self.framework == "tf":
A__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ )
else:
raise ValueError("Unsupported framework" )
return masked_index
def __magic_name__ ( self : Optional[int] , snake_case_ : GenericTensor ) -> np.ndarray:
'''simple docstring'''
A__ = self.get_masked_index(snake_case_ )
A__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __magic_name__ ( self : List[Any] , snake_case_ : GenericTensor ) -> Dict:
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case_ )
def __magic_name__ ( self : Any , snake_case_ : Tuple , snake_case_ : Union[str, Any]=None , **snake_case_ : Dict ) -> Dict[str, GenericTensor]:
'''simple docstring'''
if return_tensors is None:
A__ = self.framework
A__ = self.tokenizer(snake_case_ , return_tensors=snake_case_ )
self.ensure_exactly_one_mask_token(snake_case_ )
return model_inputs
def __magic_name__ ( self : Optional[Any] , snake_case_ : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ = self.model(**snake_case_ )
A__ = model_inputs["input_ids"]
return model_outputs
def __magic_name__ ( self : int , snake_case_ : List[Any] , snake_case_ : Dict=5 , snake_case_ : Any=None ) -> str:
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
A__ = target_ids.shape[0]
A__ = model_outputs["input_ids"][0]
A__ = model_outputs["logits"]
if self.framework == "tf":
A__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A__ = outputs.numpy()
A__ = outputs[0, masked_index, :]
A__ = stable_softmax(snake_case_ , axis=-1 )
if target_ids is not None:
A__ = tf.gather_nd(tf.squeeze(snake_case_ , 0 ) , target_ids.reshape(-1 , 1 ) )
A__ = tf.expand_dims(snake_case_ , 0 )
A__ = tf.math.top_k(snake_case_ , k=snake_case_ )
A__, A__ = topk.values.numpy(), topk.indices.numpy()
else:
A__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A__ = outputs[0, masked_index, :]
A__ = logits.softmax(dim=-1 )
if target_ids is not None:
A__ = probs[..., target_ids]
A__, A__ = probs.topk(snake_case_ )
A__ = []
A__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A__ = input_ids.numpy().copy()
if target_ids is not None:
A__ = target_ids[p].tolist()
A__ = p
# Filter padding out:
A__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A__ = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
A__ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(snake_case_ )
result.append(snake_case_ )
if single_mask:
return result[0]
return result
def __magic_name__ ( self : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[Any]=None ) -> int:
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
A__ = [targets]
try:
A__ = self.tokenizer.get_vocab()
except Exception:
A__ = {}
A__ = []
for target in targets:
A__ = vocab.get(snake_case_ , snake_case_ )
if id_ is None:
A__ = self.tokenizer(
snake_case_ , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , max_length=1 , truncation=snake_case_ , )["input_ids"]
if len(snake_case_ ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
"We cannot replace it with anything meaningful, ignoring it" )
continue
A__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
A__ = list(set(snake_case_ ) )
if len(snake_case_ ) == 0:
raise ValueError("At least one target must be provided when passed." )
A__ = np.array(snake_case_ )
return target_ids
def __magic_name__ ( self : List[str] , snake_case_ : Tuple=None , snake_case_ : List[str]=None ) -> List[Any]:
'''simple docstring'''
A__ = {}
if targets is not None:
A__ = self.get_target_ids(snake_case_ , snake_case_ )
A__ = target_ids
if top_k is not None:
A__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : Dict , snake_case_ : Tuple , *snake_case_ : Dict , **snake_case_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ = super().__call__(snake_case_ , **snake_case_ )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1:
return outputs[0]
return outputs
| 350 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( A_ ):
lowercase__ = ['''image_processor''', '''tokenizer''']
lowercase__ = '''ViTImageProcessor'''
lowercase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[Any] , snake_case_ : Union[str, Any]=None , snake_case_ : Dict=None , **snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case_ , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(snake_case_ , snake_case_ )
def __call__( self : int , snake_case_ : Union[str, Any]=None , snake_case_ : int=None , snake_case_ : Dict=None , snake_case_ : int=None , **snake_case_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
A__ = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if visual_prompt is not None:
A__ = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
A__ = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if visual_prompt is not None and images is not None:
A__ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
A__ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def __magic_name__ ( self : Tuple , *snake_case_ : List[str] , **snake_case_ : Optional[int] ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __magic_name__ ( self : Optional[Any] , *snake_case_ : Any , **snake_case_ : List[Any] ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def __magic_name__ ( self : int ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case_ , )
return self.image_processor_class
@property
def __magic_name__ ( self : int ) -> int:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case_ , )
return self.image_processor
| 230 | 0 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class snake_case__:
"""simple docstring"""
@staticmethod
def snake_case ( *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : str ):
pass
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCAmelCase__ = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
lowercase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Union[str, Any] = pipeline(
"document-question-answering" , model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = INVOICE_URL
lowercase__ : List[Any] = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , "" ) ) )
lowercase__ : List[Any] = "What is the placebo?"
lowercase__ : Dict = [
{
"image": load_image(SCREAMING_SNAKE_CASE ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Optional[int] = dqa_pipeline(SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[
{"score": ANY(SCREAMING_SNAKE_CASE ), "answer": ANY(SCREAMING_SNAKE_CASE ), "start": ANY(SCREAMING_SNAKE_CASE ), "end": ANY(SCREAMING_SNAKE_CASE )},
{"score": ANY(SCREAMING_SNAKE_CASE ), "answer": ANY(SCREAMING_SNAKE_CASE ), "start": ANY(SCREAMING_SNAKE_CASE ), "end": ANY(SCREAMING_SNAKE_CASE )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[Any] = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
lowercase__ : Optional[int] = INVOICE_URL
lowercase__ : Dict = "How many cats are there?"
lowercase__ : Optional[Any] = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
lowercase__ : str = dqa_pipeline(image=SCREAMING_SNAKE_CASE , question=SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , SCREAMING_SNAKE_CASE )
lowercase__ : Any = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , SCREAMING_SNAKE_CASE )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase__ : Any = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowercase__ : Optional[int] = dqa_pipeline(image=SCREAMING_SNAKE_CASE , question=SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(SCREAMING_SNAKE_CASE , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase__ : Tuple = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowercase__ : Union[str, Any] = []
lowercase__ : List[Any] = []
lowercase__ : List[str] = dqa_pipeline(image=SCREAMING_SNAKE_CASE , question=SCREAMING_SNAKE_CASE , words=SCREAMING_SNAKE_CASE , boxes=SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(SCREAMING_SNAKE_CASE , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self : Optional[Any] ):
lowercase__ : Any = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
lowercase__ : Tuple = INVOICE_URL
lowercase__ : List[Any] = "What is the invoice number?"
lowercase__ : List[Any] = dqa_pipeline(image=SCREAMING_SNAKE_CASE , question=SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowercase__ : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowercase__ : Union[str, Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self : Union[str, Any] ):
lowercase__ : str = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
lowercase__ : int = INVOICE_URL
lowercase__ : List[str] = "What is the invoice number?"
lowercase__ : Tuple = dqa_pipeline(image=SCREAMING_SNAKE_CASE , question=SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowercase__ : Tuple = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowercase__ : List[str] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self : Optional[int] ):
lowercase__ : List[str] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=SCREAMING_SNAKE_CASE , revision="3dc6de3" , )
lowercase__ : str = INVOICE_URL
lowercase__ : Union[str, Any] = "What is the invoice number?"
lowercase__ : List[Any] = dqa_pipeline(image=SCREAMING_SNAKE_CASE , question=SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowercase__ : Optional[Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowercase__ : Optional[int] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
lowercase__ : Any = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , "" ) ) )
# This model should also work if `image` is set to None
lowercase__ : Any = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self : Dict ):
lowercase__ : Tuple = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=SCREAMING_SNAKE_CASE , revision="3dc6de3" , max_seq_len=50 , )
lowercase__ : Union[str, Any] = INVOICE_URL
lowercase__ : int = "What is the invoice number?"
lowercase__ : int = dqa_pipeline(image=SCREAMING_SNAKE_CASE , question=SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
lowercase__ : Optional[int] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
lowercase__ : Optional[Any] = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , "" ) ) )
# This model should also work if `image` is set to None
lowercase__ : List[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def snake_case ( self : Any ):
lowercase__ : Dict = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
lowercase__ : List[str] = INVOICE_URL
lowercase__ : Dict = "What is the invoice number?"
lowercase__ : List[Any] = dqa_pipeline(image=SCREAMING_SNAKE_CASE , question=SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def snake_case ( self : List[Any] ):
pass
| 130 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : int ):
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 130 | 1 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case = logging.getLogger(__name__)
@dataclass(frozen=lowerCAmelCase )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : str
UpperCamelCase_ : str
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
@dataclass(frozen=lowerCAmelCase )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : List[int]
UpperCamelCase_ : Optional[List[int]] = None
UpperCamelCase_ : Optional[List[int]] = None
UpperCamelCase_ : Optional[Union[int, float]] = None
UpperCamelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[InputFeatures]
def __init__( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : bool = False , ):
SCREAMING_SNAKE_CASE : Tuple = hans_processors[task]()
SCREAMING_SNAKE_CASE : Dict = os.path.join(
UpperCAmelCase_ , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(UpperCAmelCase_ ) , UpperCAmelCase_ , ) , )
SCREAMING_SNAKE_CASE : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE : Tuple = cached_features_file + ".lock"
with FileLock(UpperCAmelCase_ ):
if os.path.exists(UpperCAmelCase_ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
SCREAMING_SNAKE_CASE : int = torch.load(UpperCAmelCase_ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
SCREAMING_SNAKE_CASE : List[Any] = (
processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ )
)
logger.info("Training examples: %s" , len(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Any = hans_convert_examples_to_features(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
logger.info("Saving features into cached file %s" , UpperCAmelCase_ )
torch.save(self.features , UpperCAmelCase_ )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : List[Any] , UpperCAmelCase_ : str ):
return self.features[i]
def _A ( self : List[str] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : List[InputFeatures]
def __init__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = 128 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : bool = False , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE : Tuple = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE : Dict = label_list
SCREAMING_SNAKE_CASE : Any = processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = hans_convert_examples_to_features(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(UpperCAmelCase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE : Optional[int] = tf.data.Dataset.from_generator(
UpperCAmelCase_ , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _A ( self : Tuple ):
return self.dataset
def __len__( self : Optional[int] ):
return len(self.features )
def __getitem__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] ):
return self.features[i]
def _A ( self : List[str] ):
return self.label_list
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def _A ( self : Any , UpperCAmelCase_ : Any ):
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ , "heuristics_train_set.txt" ) ) , "train" )
def _A ( self : str , UpperCAmelCase_ : Optional[int] ):
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _A ( self : Any ):
return ["contradiction", "entailment", "neutral"]
def _A ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = []
for i, line in enumerate(UpperCAmelCase_ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE : Optional[int] = "%s-%s" % (set_type, line[0])
SCREAMING_SNAKE_CASE : Optional[int] = line[5]
SCREAMING_SNAKE_CASE : List[str] = line[6]
SCREAMING_SNAKE_CASE : Optional[int] = line[7][2:] if line[7].startswith("ex" ) else line[7]
SCREAMING_SNAKE_CASE : int = line[0]
examples.append(InputExample(guid=UpperCAmelCase_ , text_a=UpperCAmelCase_ , text_b=UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
return examples
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = {label: i for i, label in enumerate(lowercase )}
SCREAMING_SNAKE_CASE : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowercase ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % (ex_index) )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowercase , max_length=lowercase , padding="max_length" , truncation=lowercase , return_overflowing_tokens=lowercase , )
SCREAMING_SNAKE_CASE : Tuple = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE : Optional[int] = int(example.pairID )
features.append(InputFeatures(**lowercase , label=lowercase , pairID=lowercase ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F'''guid: {example}''' )
logger.info(F'''features: {features[i]}''' )
return features
snake_case = {
"""hans""": 3,
}
snake_case = {
"""hans""": HansProcessor,
}
| 319 |
def lowerCamelCase__ ( lowercase , lowercase = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = length or len(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : str = True
return list_data if not swapped else bubble_sort(lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCAmelCase__ : Optional[int] = model_type_to_module_name(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = importlib.import_module(F'''.{module_name}''' , 'transformers.models' )
try:
return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE_ , '__name__' , SCREAMING_SNAKE_CASE_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCAmelCase__ : List[Any] = importlib.import_module('transformers' )
if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return None
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
lowerCAmelCase__ : str = get_file_from_repo(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as reader:
return json.load(SCREAMING_SNAKE_CASE_ )
class A__ :
def __init__( self : List[Any] ):
'''simple docstring'''
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(a )
def _lowerCamelCase ( cls : Tuple , a : Tuple , **a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = kwargs.pop('config' , a )
lowerCAmelCase__ : Optional[Any] = kwargs.pop('trust_remote_code' , a )
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = ImageProcessingMixin.get_image_processor_dict(a , **a )
lowerCAmelCase__ : str = config_dict.get('image_processor_type' , a )
lowerCAmelCase__ : List[str] = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
lowerCAmelCase__ : Union[str, Any] = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowerCAmelCase__ : List[str] = config_dict.pop('feature_extractor_type' , a )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
lowerCAmelCase__ : Optional[Any] = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
lowerCAmelCase__ : Dict = config_dict['auto_map']['AutoFeatureExtractor']
lowerCAmelCase__ : int = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(a , a ):
lowerCAmelCase__ : Union[str, Any] = AutoConfig.from_pretrained(a , **a )
# It could be in `config.image_processor_type``
lowerCAmelCase__ : List[str] = getattr(a , 'image_processor_type' , a )
if hasattr(a , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
lowerCAmelCase__ : List[Any] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
lowerCAmelCase__ : Any = image_processor_class_from_name(a )
lowerCAmelCase__ : int = image_processor_auto_map is not None
lowerCAmelCase__ : List[Any] = image_processor_class is not None or type(a ) in IMAGE_PROCESSOR_MAPPING
lowerCAmelCase__ : Optional[Any] = resolve_trust_remote_code(
a , a , a , a )
if has_remote_code and trust_remote_code:
lowerCAmelCase__ : Optional[int] = get_class_from_dynamic_module(
a , a , **a )
lowerCAmelCase__ : Any = kwargs.pop('code_revision' , a )
if os.path.isdir(a ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(a , **a )
elif image_processor_class is not None:
return image_processor_class.from_dict(a , **a )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(a ) in IMAGE_PROCESSOR_MAPPING:
lowerCAmelCase__ : Any = IMAGE_PROCESSOR_MAPPING[type(a )]
return image_processor_class.from_dict(a , **a )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def _lowerCamelCase ( a : Dict , a : List[str] ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(a , a ) | 212 |
class A__ :
def __init__( self : Optional[Any] , a : list ):
'''simple docstring'''
lowerCAmelCase__ : Dict = set_counts
lowerCAmelCase__ : str = max(a )
lowerCAmelCase__ : Any = len(a )
lowerCAmelCase__ : List[str] = [1] * num_sets
lowerCAmelCase__ : Dict = list(range(a ) )
def _lowerCamelCase ( self : Dict , a : int , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.get_parent(a )
lowerCAmelCase__ : Tuple = self.get_parent(a )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowerCAmelCase__ : List[Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Tuple = src_parent
lowerCAmelCase__ : Optional[int] = self.set_counts[src_parent]
lowerCAmelCase__ : Optional[Any] = max(self.max_set , a )
return True
def _lowerCamelCase ( self : Any , a : int ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowerCAmelCase__ : Tuple = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set] | 212 | 1 |
'''simple docstring'''
import numpy
# List of input, output pairs
snake_case_ : Tuple = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
snake_case_ : Optional[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
snake_case_ : List[Any] = [2, 4, 1, 5]
snake_case_ : int = len(train_data)
snake_case_ : Union[str, Any] = 0.0_09
def A__ ( UpperCAmelCase_ , UpperCAmelCase_="train" ):
return calculate_hypothesis_value(UpperCAmelCase_ , UpperCAmelCase_ ) - output(
UpperCAmelCase_ , UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Dict = 0
for i in range(len(UpperCAmelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A__ ( UpperCAmelCase_ , UpperCAmelCase_=m ):
_UpperCamelCase : Union[str, Any] = 0
for i in range(UpperCAmelCase_ ):
if index == -1:
summation_value += _error(UpperCAmelCase_ )
else:
summation_value += _error(UpperCAmelCase_ ) * train_data[i][0][index]
return summation_value
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = summation_of_cost_derivative(UpperCAmelCase_ , UpperCAmelCase_ ) / m
return cost_derivative_value
def A__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_UpperCamelCase : str = 0.000_002
_UpperCamelCase : Any = 0
_UpperCamelCase : List[Any] = 0
while True:
j += 1
_UpperCamelCase : List[str] = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase_ ) ):
_UpperCamelCase : List[str] = get_cost_derivative(i - 1 )
_UpperCamelCase : Optional[Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase_ , UpperCAmelCase_ , atol=UpperCAmelCase_ , rtol=UpperCAmelCase_ , ):
break
_UpperCamelCase : Tuple = temp_parameter_vector
print(('Number of iterations:', j) )
def A__ ( ):
for i in range(len(UpperCAmelCase_ ) ):
print(('Actual output value:', output(UpperCAmelCase_ , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(UpperCAmelCase_ , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 351 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Any = 'hf-internal-testing/tiny-random-t5'
_UpperCamelCase : str = AutoTokenizer.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : List[str] = tokenizer('This is me' ,return_tensors='pt' )
_UpperCamelCase : str = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_UpperCamelCase : Optional[Any] = model.generate(**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_UpperCamelCase : Optional[Any] = model_reloaded.generate(**lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : List[Any] = 'hf-internal-testing/tiny-random-t5'
_UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : List[str] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCamelCase__ ):
model.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : str = model.reverse_bettertransformer()
model.save_pretrained(lowerCamelCase__ )
| 236 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = KandinskyVaaImgaImgPipeline
_SCREAMING_SNAKE_CASE = ['image_embeds', 'negative_image_embeds', 'image']
_SCREAMING_SNAKE_CASE = [
'image_embeds',
'negative_image_embeds',
'image',
]
_SCREAMING_SNAKE_CASE = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_SCREAMING_SNAKE_CASE = False
@property
def _snake_case ( self ) -> str:
return 32
@property
def _snake_case ( self ) -> Optional[int]:
return 32
@property
def _snake_case ( self ) -> Optional[Any]:
return self.time_input_dim
@property
def _snake_case ( self ) -> List[Any]:
return self.time_input_dim * 4
@property
def _snake_case ( self ) -> Optional[int]:
return 100
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCAmelCase = UNetaDConditionModel(**lowercase )
return model
@property
def _snake_case ( self ) -> Dict:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self ) -> str:
lowerCAmelCase = self.dummy_unet
lowerCAmelCase = self.dummy_movq
lowerCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
lowerCAmelCase = DDIMScheduler(**lowercase )
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _snake_case ( self , lowercase , lowercase=0 ) -> Any:
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase ) ).to(lowercase )
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase )
# create init_image
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase = Image.fromarray(np.uinta(lowercase ) ).convert("""RGB""" ).resize((256, 256) )
if str(lowercase ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(lowercase )
else:
lowerCAmelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _snake_case ( self ) -> int:
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowercase )
lowerCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = pipe(**self.get_dummy_inputs(lowercase ) )
lowerCAmelCase = output.images
lowerCAmelCase = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowerCAmelCase = """A red cartoon frog, 4k"""
lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase )
lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
lowerCAmelCase = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowerCAmelCase = pipeline(
image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase , lowercase )
| 46 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_SCREAMING_SNAKE_CASE = Features({'text': Value('string' )} )
_SCREAMING_SNAKE_CASE = Features({} )
_SCREAMING_SNAKE_CASE = "text"
@property
def _snake_case ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 46 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( _lowercase , _lowercase ):
assert isinstance(_lowercase , _lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase ).read()
_check_parquet_dataset(_lowercase , _lowercase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : List[str] = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(_lowercase , features=_lowercase , cache_dir=_lowercase ).read()
_check_parquet_dataset(_lowercase , _lowercase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(_lowercase , cache_dir=_lowercase , split=_lowercase ).read()
_check_parquet_dataset(_lowercase , _lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def A ( _lowercase , _lowercase , _lowercase ):
if issubclass(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = parquet_path
elif issubclass(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : int = [parquet_path]
SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE : Optional[int] = ParquetDatasetReader(_lowercase , cache_dir=_lowercase ).read()
_check_parquet_dataset(_lowercase , _lowercase )
def A ( _lowercase , _lowercase , _lowercase=("train",) ):
assert isinstance(_lowercase , _lowercase )
for split in splits:
SCREAMING_SNAKE_CASE : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Dict = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE : Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=_lowercase , keep_in_memory=_lowercase ).read()
_check_parquet_datasetdict(_lowercase , _lowercase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE : int = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader({'''train''': parquet_path} , features=_lowercase , cache_dir=_lowercase ).read()
_check_parquet_datasetdict(_lowercase , _lowercase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def A ( _lowercase , _lowercase , _lowercase ):
if split:
SCREAMING_SNAKE_CASE : List[Any] = {split: parquet_path}
else:
SCREAMING_SNAKE_CASE : List[str] = '''train'''
SCREAMING_SNAKE_CASE : Dict = {'''train''': parquet_path, '''test''': parquet_path}
SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(_lowercase , cache_dir=_lowercase ).read()
_check_parquet_datasetdict(_lowercase , _lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(_lowercase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : str = pq.ParquetFile(tmp_path / '''foo.parquet''' )
SCREAMING_SNAKE_CASE : int = pf.read()
assert dataset.data.table == output_table
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = str(shared_datadir / '''test_image_rgb.jpg''' )
SCREAMING_SNAKE_CASE : Optional[Any] = {'''image''': [image_path]}
SCREAMING_SNAKE_CASE : Union[str, Any] = Features({'''image''': Image()} )
SCREAMING_SNAKE_CASE : Optional[Any] = Dataset.from_dict(_lowercase , features=_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetWriter(_lowercase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : Union[str, Any] = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=_lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A ( _lowercase , _lowercase ):
assert get_writer_batch_size(_lowercase ) == expected
| 354 | import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase__ :
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]="resnet50" , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Any=True , UpperCamelCase__ : int=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = out_indices if out_indices is not None else [4]
SCREAMING_SNAKE_CASE : List[Any] = stage_names
SCREAMING_SNAKE_CASE : int = out_features
SCREAMING_SNAKE_CASE : Optional[int] = backbone
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : Dict = image_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = is_training
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values
def __A ( self : List[Any] ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = TimmBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = (TimmBackbone,) if is_torch_available() else ()
UpperCamelCase_ = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TimmBackboneModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''resnet18'''
SCREAMING_SNAKE_CASE : str = '''microsoft/resnet-18'''
SCREAMING_SNAKE_CASE : Dict = AutoBackbone.from_pretrained(UpperCamelCase__ , use_timm_backbone=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = AutoBackbone.from_pretrained(UpperCamelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
SCREAMING_SNAKE_CASE : List[str] = AutoBackbone.from_pretrained(UpperCamelCase__ , use_timm_backbone=UpperCamelCase__ , out_indices=[1, 2, 3] )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoBackbone.from_pretrained(UpperCamelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __A ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __A ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __A ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __A ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self : int ):
'''simple docstring'''
pass
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Any = self.has_attentions
# no need to test all models as different heads yield the same functionality
SCREAMING_SNAKE_CASE : Any = self.all_model_classes[0]
SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = outputs[0][-1]
# Encoder-/Decoder-only models
SCREAMING_SNAKE_CASE : List[Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
SCREAMING_SNAKE_CASE : Any = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCamelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(**UpperCamelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
SCREAMING_SNAKE_CASE : List[str] = copy.deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(**UpperCamelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(**UpperCamelCase__ )
| 258 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__( __A , unittest.TestCase ):
lowerCAmelCase__ : List[str] = KandinskyImgaImgPipeline
lowerCAmelCase__ : int = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
lowerCAmelCase__ : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
lowerCAmelCase__ : Optional[Any] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCAmelCase__ : Tuple = False
@property
def snake_case__ ( self ) -> int:
return 32
@property
def snake_case__ ( self ) -> Dict:
return 32
@property
def snake_case__ ( self ) -> Any:
return self.time_input_dim
@property
def snake_case__ ( self ) -> Any:
return self.time_input_dim * 4
@property
def snake_case__ ( self ) -> Dict:
return 1_00
@property
def snake_case__ ( self ) -> str:
A__ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def snake_case__ ( self ) -> int:
torch.manual_seed(0 )
A__ = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=10_05 ,)
A__ = MultilingualCLIP(__UpperCAmelCase )
A__ = text_encoder.eval()
return text_encoder
@property
def snake_case__ ( self ) -> List[str]:
torch.manual_seed(0 )
A__ = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
A__ = UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def snake_case__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
A__ = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.dummy_text_encoder
A__ = self.dummy_tokenizer
A__ = self.dummy_unet
A__ = self.dummy_movq
A__ = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
A__ = DDIMScheduler(**__UpperCAmelCase )
A__ = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ) -> Dict:
A__ = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
A__ = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(__UpperCAmelCase )
# create init_image
A__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
A__ = image.cpu().permute(0 ,2 ,3 ,1 )[0]
A__ = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('RGB' ).resize((2_56, 2_56) )
if str(__UpperCAmelCase ).startswith('mps' ):
A__ = torch.manual_seed(__UpperCAmelCase )
else:
A__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
A__ = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def snake_case__ ( self ) -> List[Any]:
A__ = 'cpu'
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**__UpperCAmelCase )
A__ = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
A__ = output.images
A__ = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) ,return_dict=__UpperCAmelCase ,)[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ) -> int:
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
A__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
A__ = 'A red cartoon frog, 4k'
A__ = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' ,torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
A__ = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' ,torch_dtype=torch.floataa )
A__ = pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ , A__ = pipe_prior(
__UpperCAmelCase ,generator=__UpperCAmelCase ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
A__ = pipeline(
__UpperCAmelCase ,image=__UpperCAmelCase ,image_embeds=__UpperCAmelCase ,negative_image_embeds=__UpperCAmelCase ,generator=__UpperCAmelCase ,num_inference_steps=1_00 ,height=7_68 ,width=7_68 ,strength=0.2 ,output_type='np' ,)
A__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__UpperCAmelCase ,__UpperCAmelCase )
| 221 | """simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__lowerCamelCase = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__lowerCamelCase = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
__lowerCamelCase = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
def snake_case__ ( self ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' ,id='sequence' ) ,id='references' ),
} ) ,codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] ,reference_urls=[
'https://github.com/m-popovic/chrF',
] ,)
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = CHRF.CHAR_ORDER ,__UpperCAmelCase = CHRF.WORD_ORDER ,__UpperCAmelCase = CHRF.BETA ,__UpperCAmelCase = False ,__UpperCAmelCase = False ,__UpperCAmelCase = False ,) -> Union[str, Any]:
A__ = len(references[0] )
if any(len(__UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
A__ = [[refs[i] for refs in references] for i in range(__UpperCAmelCase )]
A__ = CHRF(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
A__ = sb_chrf.corpus_score(__UpperCAmelCase ,__UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 221 | 1 |
import json
import sys
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE = json.load(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = results[benchmark_name]
SCREAMING_SNAKE_CASE = benchmark_name.split("""/""" )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
SCREAMING_SNAKE_CASE = """| metric |"""
SCREAMING_SNAKE_CASE = """|--------|"""
SCREAMING_SNAKE_CASE = """| new / old (diff) |"""
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = benchmark_res[metric_name]
SCREAMING_SNAKE_CASE = metric_vals["""new"""]
SCREAMING_SNAKE_CASE = metric_vals.get("""old""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = metric_vals.get("""diff""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = F""" {new_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else """None"""
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = sys.argv[1]
SCREAMING_SNAKE_CASE_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 193 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : str = AltDiffusionPipeline
__snake_case : int = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
__snake_case : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5002 ,)
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
SCREAMING_SNAKE_CASE = 77
SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int=0 ) -> Any:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = text_encoder
SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A photo of an astronaut"""
SCREAMING_SNAKE_CASE = alt_pipe(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = text_encoder
SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" ,safety_checker=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe([prompt] ,generator=lowerCamelCase__ ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type="""np""" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" ,subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" ,scheduler=lowerCamelCase__ ,safety_checker=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe([prompt] ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type="""numpy""" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 193 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def lowerCamelCase ( __lowerCamelCase : Sequence[float] , __lowerCamelCase : bool = False ) ->float:
if not arr:
return 0
_SCREAMING_SNAKE_CASE = 0 if allow_empty_subarrays else float("""-inf""" )
_SCREAMING_SNAKE_CASE = 0.0
for num in arr:
_SCREAMING_SNAKE_CASE = max(0 if allow_empty_subarrays else num , curr_sum + num )
_SCREAMING_SNAKE_CASE = max(__lowerCamelCase , __lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 58 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCamelCase ( __lowerCamelCase : Tuple ) ->Tuple:
_SCREAMING_SNAKE_CASE = fname.split(os.path.sep )[-1]
return re.search(R"""^(.*)_\d+\.jpg$""" , __lowerCamelCase ).groups()[0]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A , A=None , A=None ) -> int:
_SCREAMING_SNAKE_CASE = file_names
_SCREAMING_SNAKE_CASE = image_transform
_SCREAMING_SNAKE_CASE = label_to_id
def __len__( self ) -> Optional[Any]:
return len(self.file_names )
def __getitem__( self , A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.file_names[idx]
_SCREAMING_SNAKE_CASE = PIL.Image.open(A )
_SCREAMING_SNAKE_CASE = raw_image.convert("""RGB""" )
if self.image_transform is not None:
_SCREAMING_SNAKE_CASE = self.image_transform(A )
_SCREAMING_SNAKE_CASE = extract_label(A )
if self.label_to_id is not None:
_SCREAMING_SNAKE_CASE = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple ) ->str:
# Initialize accelerator
if args.with_tracking:
_SCREAMING_SNAKE_CASE = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
_SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE = config["""lr"""]
_SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE = config["""image_size"""]
if not isinstance(__lowerCamelCase , (list, tuple) ):
_SCREAMING_SNAKE_CASE = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , """isdigit""" ):
if args.checkpointing_steps == "epoch":
_SCREAMING_SNAKE_CASE = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_SCREAMING_SNAKE_CASE = int(args.checkpointing_steps )
else:
raise ValueError(
F'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
_SCREAMING_SNAKE_CASE = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_SCREAMING_SNAKE_CASE = os.path.split(__lowerCamelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(__lowerCamelCase , __lowerCamelCase )
# Grab all the image filenames
_SCREAMING_SNAKE_CASE = [os.path.join(args.data_dir , __lowerCamelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )]
# Build the label correspondences
_SCREAMING_SNAKE_CASE = [extract_label(__lowerCamelCase ) for fname in file_names]
_SCREAMING_SNAKE_CASE = list(set(__lowerCamelCase ) )
id_to_label.sort()
_SCREAMING_SNAKE_CASE = {lbl: i for i, lbl in enumerate(__lowerCamelCase )}
# Set the seed before splitting the data.
np.random.seed(__lowerCamelCase )
torch.manual_seed(__lowerCamelCase )
torch.cuda.manual_seed_all(__lowerCamelCase )
# Split our filenames between train and validation
_SCREAMING_SNAKE_CASE = np.random.permutation(len(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = int(0.8 * len(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = random_perm[:cut]
_SCREAMING_SNAKE_CASE = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_SCREAMING_SNAKE_CASE = Compose([RandomResizedCrop(__lowerCamelCase , scale=(0.5, 1.0) ), ToTensor()] )
_SCREAMING_SNAKE_CASE = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__lowerCamelCase , label_to_id=__lowerCamelCase )
# For evaluation, we use a deterministic Resize
_SCREAMING_SNAKE_CASE = Compose([Resize(__lowerCamelCase ), ToTensor()] )
_SCREAMING_SNAKE_CASE = PetsDataset([file_names[i] for i in eval_split] , image_transform=__lowerCamelCase , label_to_id=__lowerCamelCase )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
_SCREAMING_SNAKE_CASE = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE = create_model("""resnet50d""" , pretrained=__lowerCamelCase , num_classes=len(__lowerCamelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_SCREAMING_SNAKE_CASE = False
for param in model.get_classifier().parameters():
_SCREAMING_SNAKE_CASE = True
# We normalize the batches of images to be a bit faster.
_SCREAMING_SNAKE_CASE = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device )
_SCREAMING_SNAKE_CASE = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_SCREAMING_SNAKE_CASE = OneCycleLR(optimizer=__lowerCamelCase , max_lr=__lowerCamelCase , epochs=__lowerCamelCase , steps_per_epoch=len(__lowerCamelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
_SCREAMING_SNAKE_CASE = 0
# We also need to keep track of the starting epoch so files are named properly
_SCREAMING_SNAKE_CASE = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
_SCREAMING_SNAKE_CASE = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_SCREAMING_SNAKE_CASE = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_SCREAMING_SNAKE_CASE = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_SCREAMING_SNAKE_CASE = os.path.splitext(__lowerCamelCase )[0]
if "epoch" in training_difference:
_SCREAMING_SNAKE_CASE = int(training_difference.replace("""epoch_""" , """""" ) ) + 1
_SCREAMING_SNAKE_CASE = None
else:
_SCREAMING_SNAKE_CASE = int(training_difference.replace("""step_""" , """""" ) )
_SCREAMING_SNAKE_CASE = resume_step // len(__lowerCamelCase )
resume_step -= starting_epoch * len(__lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase , __lowerCamelCase ):
model.train()
if args.with_tracking:
_SCREAMING_SNAKE_CASE = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_SCREAMING_SNAKE_CASE = accelerator.skip_first_batches(__lowerCamelCase , __lowerCamelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_SCREAMING_SNAKE_CASE = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_SCREAMING_SNAKE_CASE = {k: v.to(accelerator.device ) for k, v in batch.items()}
_SCREAMING_SNAKE_CASE = (batch["""image"""] - mean) / std
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.nn.functional.cross_entropy(__lowerCamelCase , batch["""label"""] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE = F'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , __lowerCamelCase )
accelerator.save_state(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_SCREAMING_SNAKE_CASE = {k: v.to(accelerator.device ) for k, v in batch.items()}
_SCREAMING_SNAKE_CASE = (batch["""image"""] - mean) / std
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = outputs.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["""label"""]) )
_SCREAMING_SNAKE_CASE = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_SCREAMING_SNAKE_CASE = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}: {100 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"""accuracy""": 100 * eval_metric,
"""train_loss""": total_loss.item() / len(__lowerCamelCase ),
"""epoch""": epoch,
} , step=__lowerCamelCase , )
if checkpointing_steps == "epoch":
_SCREAMING_SNAKE_CASE = F'epoch_{epoch}'
if args.output_dir is not None:
_SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , __lowerCamelCase )
accelerator.save_state(__lowerCamelCase )
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase ( ) ->int:
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument("""--data_dir""" , required=__lowerCamelCase , help="""The data folder on disk.""" )
parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCamelCase , default=__lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--checkpointing_steps""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , )
parser.add_argument(
"""--output_dir""" , type=__lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=__lowerCamelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = {"""lr""": 3e-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 224}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 58 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple =logging.get_logger(__name__)
lowerCamelCase : Tuple ={
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class __a ( A__ ):
_lowerCAmelCase : str = '''roc_bert'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : List[str]=3_05_22 , SCREAMING_SNAKE_CASE : Tuple=7_68 , SCREAMING_SNAKE_CASE : List[str]=12 , SCREAMING_SNAKE_CASE : Optional[Any]=12 , SCREAMING_SNAKE_CASE : Tuple=30_72 , SCREAMING_SNAKE_CASE : List[Any]="gelu" , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : Dict=5_12 , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : int=0.0_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=1e-1_2 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : Tuple="absolute" , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Optional[int]=7_68 , SCREAMING_SNAKE_CASE : Dict=9_10 , SCREAMING_SNAKE_CASE : int=5_12 , SCREAMING_SNAKE_CASE : List[Any]=2_48_58 , SCREAMING_SNAKE_CASE : List[str]=True , **SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
UpperCamelCase__ : Dict = vocab_size
UpperCamelCase__ : Any = max_position_embeddings
UpperCamelCase__ : List[str] = hidden_size
UpperCamelCase__ : str = num_hidden_layers
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : Dict = intermediate_size
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : List[str] = hidden_dropout_prob
UpperCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Union[str, Any] = type_vocab_size
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Any = use_cache
UpperCamelCase__ : List[str] = enable_pronunciation
UpperCamelCase__ : Optional[int] = enable_shape
UpperCamelCase__ : Union[str, Any] = pronunciation_embed_dim
UpperCamelCase__ : Tuple = pronunciation_vocab_size
UpperCamelCase__ : str = shape_embed_dim
UpperCamelCase__ : Any = shape_vocab_size
UpperCamelCase__ : List[Any] = concat_input
UpperCamelCase__ : Union[str, Any] = position_embedding_type
UpperCamelCase__ : str = classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) | 354 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
UpperCamelCase__ : List[Any] = f'The input value of [n={number}] has to be > 0'
raise ValueError(__lowerCAmelCase )
else:
UpperCamelCase__ : Optional[Any] = sylvester(number - 1 )
UpperCamelCase__ : str = num - 1
UpperCamelCase__ : int = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""") | 196 | 0 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
A_ = logging.get_logger(__name__)
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Optional[int], a_: int=None, **a_: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""", a_, )
super().__init__(args=a_, **a_ )
| 64 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = CodeGenTokenizer
lowercase__ = CodeGenTokenizerFast
lowercase__ = True
lowercase__ = {"add_prefix_space": True}
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_snake_case : Tuple = dict(zip(a_, range(len(a_ ) ) ) )
_snake_case : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_snake_case : List[Any] = {"""unk_token""": """<unk>"""}
_snake_case : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def UpperCamelCase_ ( self: Any, **a_: int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Any, **a_: str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = """lower newer"""
_snake_case : Tuple = """lower newer"""
return input_text, output_text
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
_snake_case : Optional[Any] = """lower newer"""
_snake_case : Optional[int] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_snake_case : int = tokenizer.tokenize(a_, add_prefix_space=a_ )
self.assertListEqual(a_, a_ )
_snake_case : str = tokens + [tokenizer.unk_token]
_snake_case : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : int = self.get_tokenizer()
_snake_case : int = self.get_rust_tokenizer(add_prefix_space=a_ )
_snake_case : Dict = """lower newer"""
# Testing tokenization
_snake_case : Dict = tokenizer.tokenize(a_, add_prefix_space=a_ )
_snake_case : List[str] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids without special tokens
_snake_case : Optional[Any] = tokenizer.encode(a_, add_special_tokens=a_, add_prefix_space=a_ )
_snake_case : Tuple = rust_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
# Testing conversion to ids with special tokens
_snake_case : Tuple = self.get_rust_tokenizer(add_prefix_space=a_ )
_snake_case : int = tokenizer.encode(a_, add_prefix_space=a_ )
_snake_case : Optional[Any] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_, a_ )
# Testing the unknown token
_snake_case : Tuple = tokens + [rust_tokenizer.unk_token]
_snake_case : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ), a_ )
def UpperCamelCase_ ( self: Dict, *a_: Dict, **a_: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int, a_: List[Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
# Simple input
_snake_case : Any = """This is a simple input"""
_snake_case : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Optional[int] = ("""This is a simple input""", """This is a pair""")
_snake_case : Optional[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
# Pair input
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token="""<pad>""" )
# Simple input
_snake_case : List[Any] = """This is a simple input"""
_snake_case : int = ["""This is a simple input looooooooong""", """This is a simple input"""]
_snake_case : Any = ("""This is a simple input""", """This is a pair""")
_snake_case : str = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_snake_case : str = tokenizer.pad_token_id
_snake_case : Optional[int] = tokenizer(a_, padding="""max_length""", max_length=30, return_tensors="""np""" )
_snake_case : Dict = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" )
_snake_case : Tuple = tokenizer(*a_, padding="""max_length""", max_length=60, return_tensors="""np""" )
_snake_case : Optional[Any] = tokenizer(a_, padding=a_, truncate=a_, return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1], 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1], 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1], 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1], 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = """$$$"""
_snake_case : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=a_, add_bos_token=a_ )
_snake_case : str = """This is a simple input"""
_snake_case : int = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Union[str, Any] = tokenizer.bos_token_id
_snake_case : Tuple = tokenizer(a_ )
_snake_case : Optional[Any] = tokenizer(a_ )
self.assertEqual(out_s.input_ids[0], a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_snake_case : Optional[int] = tokenizer.decode(out_s.input_ids )
_snake_case : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[int] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_snake_case : Dict = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_snake_case : Union[str, Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_snake_case : Optional[Any] = tokenizer.encode(a_ )
_snake_case : Dict = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_snake_case : Optional[Any] = tokenizer.decode(a_, truncate_before_pattern=a_ )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
| 64 | 1 |
from math import loga
def lowerCAmelCase__ ( a__: int ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(a__ , a__ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __a ( UpperCAmelCase ):
_a : Optional[int] = 'MCTCTFeatureExtractor'
_a : int = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.feature_extractor
_UpperCAmelCase = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_UpperCAmelCase = kwargs.pop('raw_speech' )
else:
_UpperCAmelCase = kwargs.pop('audio' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('sampling_rate' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_UpperCAmelCase = self.feature_extractor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCAmelCase = encodings['input_ids']
return inputs
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('input_features' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('labels' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if input_features is not None:
_UpperCAmelCase = self.feature_extractor.pad(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if labels is not None:
_UpperCAmelCase = self.tokenizer.pad(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_UpperCAmelCase = labels['input_ids']
return input_features
def UpperCAmelCase__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.feature_extractor
_UpperCAmelCase = False
| 185 | 0 |
def _a ( SCREAMING_SNAKE_CASE : int | float | str ):
"""simple docstring"""
try:
UpperCamelCase__ : List[Any] = float(SCREAMING_SNAKE_CASE )
except ValueError:
raise ValueError('''Please enter a valid number''' )
UpperCamelCase__ : Optional[int] = decimal - int(SCREAMING_SNAKE_CASE )
if fractional_part == 0:
return int(SCREAMING_SNAKE_CASE ), 1
else:
UpperCamelCase__ : int = len(str(SCREAMING_SNAKE_CASE ).split('''.''' )[1] )
UpperCamelCase__ : str = int(decimal * (10**number_of_frac_digits) )
UpperCamelCase__ : Dict = 10**number_of_frac_digits
UpperCamelCase__ , UpperCamelCase__ : Any = denominator, numerator
while True:
UpperCamelCase__ : Optional[Any] = dividend % divisor
if remainder == 0:
break
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = divisor, remainder
UpperCamelCase__ , UpperCamelCase__ : str = numerator / divisor, denominator / divisor
return int(SCREAMING_SNAKE_CASE ), int(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"{decimal_to_fraction(2) = }")
print(f"{decimal_to_fraction(89.0) = }")
print(f"{decimal_to_fraction('67') = }")
print(f"{decimal_to_fraction('45.0') = }")
print(f"{decimal_to_fraction(1.5) = }")
print(f"{decimal_to_fraction('6.25') = }")
print(f"{decimal_to_fraction('78td') = }")
| 146 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = '''ZinengTang/tvlt-base'''
UpperCamelCase__ : int = tempfile.mkdtemp()
def UpperCAmelCase__ ( self : int , **lowerCamelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , **lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = self.get_image_processor()
UpperCamelCase__ : Union[str, Any] = self.get_feature_extractor()
UpperCamelCase__ : List[str] = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Optional[int] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase__ )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : str = self.get_image_processor()
UpperCamelCase__ : List[Any] = self.get_feature_extractor()
UpperCamelCase__ : Dict = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : Any = np.ones([12000] )
UpperCamelCase__ : Union[str, Any] = feature_extractor(lowerCamelCase__ , return_tensors='''np''' )
UpperCamelCase__ : Any = processor(audio=lowerCamelCase__ , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.get_image_processor()
UpperCamelCase__ : Any = self.get_feature_extractor()
UpperCamelCase__ : int = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : int = np.ones([3, 224, 224] )
UpperCamelCase__ : List[str] = image_processor(lowerCamelCase__ , return_tensors='''np''' )
UpperCamelCase__ : str = processor(images=lowerCamelCase__ , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_feature_extractor()
UpperCamelCase__ : Union[str, Any] = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : List[str] = np.ones([12000] )
UpperCamelCase__ : Tuple = np.ones([3, 224, 224] )
UpperCamelCase__ : Optional[Any] = processor(audio=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def UpperCAmelCase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_image_processor()
UpperCamelCase__ : str = self.get_feature_extractor()
UpperCamelCase__ : Tuple = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 146 | 1 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=3 , _UpperCamelCase=3_2 , _UpperCamelCase=3 , _UpperCamelCase=1_0 , _UpperCamelCase=[8, 1_6, 3_2, 6_4] , _UpperCamelCase=[1, 1, 2, 1] , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase="relu" , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=["stage2", "stage3", "stage4"] , _UpperCamelCase=[2, 3, 4] , _UpperCamelCase=1 , ) -> Any:
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : Optional[Any] = embeddings_size
UpperCAmelCase_ : Optional[Any] = hidden_sizes
UpperCAmelCase_ : Tuple = depths
UpperCAmelCase_ : Tuple = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = num_labels
UpperCAmelCase_ : Optional[int] = scope
UpperCAmelCase_ : Any = len(_UpperCamelCase )
UpperCAmelCase_ : List[str] = out_features
UpperCAmelCase_ : List[Any] = out_indices
UpperCAmelCase_ : Optional[Any] = num_groups
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Any = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : Tuple = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> Optional[int]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = BitModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : int = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.num_labels
UpperCAmelCase_ : Optional[int] = BitForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
UpperCAmelCase_ : List[Any] = BitBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : str = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[int] = BitBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : int = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = config_and_inputs
UpperCAmelCase_ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_snake_case : List[Any] = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
_snake_case : str = False
_snake_case : Optional[int] = False
_snake_case : Tuple = False
_snake_case : Any = False
_snake_case : int = False
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = BitModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ) -> Dict:
return
@unittest.skip(reason='Bit does not output attentions' )
def __UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def __UpperCAmelCase ( self ) -> Dict:
pass
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : str = model_class(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase_ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(config=_UpperCamelCase )
for name, module in model.named_modules():
if isinstance(_UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCAmelCase ( self ) -> str:
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
UpperCAmelCase_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_ : Union[str, Any] = layer_type
UpperCAmelCase_ : List[Any] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def __UpperCAmelCase ( self ) -> Any:
pass
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def __UpperCAmelCase ( self ) -> Dict:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : int = BitModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : int = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.default_image_processor
UpperCAmelCase_ : List[Any] = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : int = model(**_UpperCamelCase )
# verify the logits
UpperCAmelCase_ : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
UpperCAmelCase_ : Dict = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
@require_torch
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : List[Any] = (BitBackbone,) if is_torch_available() else ()
_snake_case : Dict = BitConfig
_snake_case : Dict = False
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : str = BitModelTester(self )
| 145 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = 0, 0, 0
UpperCAmelCase_ : Union[str, Any] = ugly_nums[ia] * 2
UpperCAmelCase_ : Tuple = ugly_nums[ia] * 3
UpperCAmelCase_ : Union[str, Any] = ugly_nums[ia] * 5
for _ in range(1 , __snake_case ):
UpperCAmelCase_ : Tuple = min(__snake_case , __snake_case , __snake_case )
ugly_nums.append(__snake_case )
if next_num == next_a:
ia += 1
UpperCAmelCase_ : Union[str, Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ : Any = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ : List[str] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'{ugly_numbers(200) = }')
| 145 | 1 |
import requests
from bsa import BeautifulSoup
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : int = BeautifulSoup(requests.get(__magic_name__ , params=__magic_name__ ).content , '''html.parser''' )
lowercase : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
lowercase : Tuple = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase_ = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 20_18,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params)) | 308 |
from heapq import heappop, heappush
import numpy as np
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
lowercase , lowercase : Optional[int] = grid.shape
lowercase : Optional[int] = [-1, 1, 0, 0]
lowercase : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase , lowercase : Union[str, Any] = [(0, source)], set()
lowercase : List[str] = np.full((rows, cols) , np.inf )
lowercase : Dict = 0
lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ )
lowercase : Any = None
while queue:
((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase : Tuple = []
while (x, y) != source:
path.append((x, y) )
lowercase , lowercase : Optional[int] = predecessors[x, y]
path.append(__magic_name__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__magic_name__ , (dist + 1, (nx, ny)) )
lowercase : int = dist + 1
lowercase : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod() | 308 | 1 |
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Union[str, Any] =word.split()
def justify(SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> str:
a__ : Dict =max_width - width
a__ : Dict =len(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
a__ : List[str] =words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
a__ : Tuple =spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
a__ : int =(
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(SCREAMING_SNAKE_CASE ):
num_spaces_between_words_list[i] += 1
a__ : int =[]
for i in range(SCREAMING_SNAKE_CASE ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(SCREAMING_SNAKE_CASE )
a__ : int =[]
a__ : list[str] =[]
a__ : List[str] =0
for word in words:
if width + len(SCREAMING_SNAKE_CASE ) + len(SCREAMING_SNAKE_CASE ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(SCREAMING_SNAKE_CASE )
width += len(SCREAMING_SNAKE_CASE )
else:
# justify the line and add it to result
answer.append(justify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# reset new line and new width
a__ , a__ : Optional[int] =[word], len(SCREAMING_SNAKE_CASE )
a__ : List[Any] =max_width - width - len(SCREAMING_SNAKE_CASE )
answer.append(" ".join(SCREAMING_SNAKE_CASE ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 148 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__)
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : str = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_lowercase : ClassVar[Features] = Features({"""image""": Image()})
_lowercase : ClassVar[Features] = Features({"""labels""": ClassLabel})
_lowercase : str = "image"
_lowercase : str = "labels"
def _lowercase ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
a__ : int =copy.deepcopy(self )
a__ : Dict =self.label_schema.copy()
a__ : int =features[self.label_column]
a__ : str =label_schema
return task_template
@property
def _lowercase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 148 | 1 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
__a = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def A_ ( ):
'''simple docstring'''
snake_case_ :str = os.path.dirname(os.path.realpath(snake_case__ ) )
snake_case_ :Tuple = os.path.join(snake_case__, """words.txt""" )
snake_case_ :str = """"""
with open(snake_case__ ) as f:
snake_case_ :int = f.readline()
snake_case_ :Optional[Any] = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
snake_case_ :Optional[int] = [
word
for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(snake_case__ )
if __name__ == "__main__":
print(solution())
| 66 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , __a , __a=7 , __a=3 , __a=30 , __a=400 , __a=True , __a=None , __a=True , __a=[0.5, 0.5, 0.5] , __a=[0.5, 0.5, 0.5] , __a=True , __a=1 / 255 , __a=True , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = min_resolution
UpperCAmelCase__ = max_resolution
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean
UpperCAmelCase__ = image_std
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = rescale_factor
UpperCAmelCase__ = do_pad
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase__ (self , __a , __a=False ) -> List[str]:
"""simple docstring"""
if not batched:
UpperCAmelCase__ = image_inputs[0]
if isinstance(__a , Image.Image ):
UpperCAmelCase__ , UpperCAmelCase__ = image.size
else:
UpperCAmelCase__ , UpperCAmelCase__ = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase__ = int(self.size['shortest_edge'] * h / w )
UpperCAmelCase__ = self.size['shortest_edge']
elif w > h:
UpperCAmelCase__ = self.size['shortest_edge']
UpperCAmelCase__ = int(self.size['shortest_edge'] * w / h )
else:
UpperCAmelCase__ = self.size['shortest_edge']
UpperCAmelCase__ = self.size['shortest_edge']
else:
UpperCAmelCase__ = []
for image in image_inputs:
UpperCAmelCase__ , UpperCAmelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase__ = max(__a , key=lambda __a : item[0] )[0]
UpperCAmelCase__ = max(__a , key=lambda __a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = DeformableDetrImageProcessingTester(self )
@property
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'image_mean' ) )
self.assertTrue(hasattr(__a , 'image_std' ) )
self.assertTrue(hasattr(__a , 'do_normalize' ) )
self.assertTrue(hasattr(__a , 'do_resize' ) )
self.assertTrue(hasattr(__a , 'do_rescale' ) )
self.assertTrue(hasattr(__a , 'do_pad' ) )
self.assertTrue(hasattr(__a , 'size' ) )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , __a )
UpperCAmelCase__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__a )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
pass
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
UpperCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor_tester.get_expected_values(__a , batched=__a )
UpperCAmelCase__ = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
UpperCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ = image_processing(__a , return_tensors='pt' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
UpperCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase__ = image_processing(__a , return_tensors='pt' ).pixel_values
UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
UpperCAmelCase__ = json.loads(f.read() )
UpperCAmelCase__ = {'image_id': 39769, 'annotations': target}
# encode them
UpperCAmelCase__ = DeformableDetrImageProcessor()
UpperCAmelCase__ = image_processing(images=__a , annotations=__a , return_tensors='pt' )
# verify pixel values
UpperCAmelCase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __a )
UpperCAmelCase__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __a , atol=1E-4 ) )
# verify area
UpperCAmelCase__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __a ) )
# verify boxes
UpperCAmelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __a )
UpperCAmelCase__ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __a , atol=1E-3 ) )
# verify image_id
UpperCAmelCase__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __a ) )
# verify is_crowd
UpperCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __a ) )
# verify class_labels
UpperCAmelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __a ) )
# verify orig_size
UpperCAmelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __a ) )
# verify size
UpperCAmelCase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __a ) )
@slow
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
UpperCAmelCase__ = json.loads(f.read() )
UpperCAmelCase__ = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
UpperCAmelCase__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
UpperCAmelCase__ = DeformableDetrImageProcessor(format='coco_panoptic' )
UpperCAmelCase__ = image_processing(images=__a , annotations=__a , masks_path=__a , return_tensors='pt' )
# verify pixel values
UpperCAmelCase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , __a )
UpperCAmelCase__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __a , atol=1E-4 ) )
# verify area
UpperCAmelCase__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __a ) )
# verify boxes
UpperCAmelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __a )
UpperCAmelCase__ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __a , atol=1E-3 ) )
# verify image_id
UpperCAmelCase__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __a ) )
# verify is_crowd
UpperCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __a ) )
# verify class_labels
UpperCAmelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __a ) )
# verify masks
UpperCAmelCase__ = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __a )
# verify orig_size
UpperCAmelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __a ) )
# verify size
UpperCAmelCase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __a ) )
| 335 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 13
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = 99
UpperCAmelCase__ = 384
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 37
UpperCAmelCase__ = 'gelu'
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 512
UpperCAmelCase__ = 16
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = 128
UpperCAmelCase__ = 2
UpperCAmelCase__ = 9
UpperCAmelCase__ = 1
UpperCAmelCase__ = None
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel(config=__a )
UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__a )
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForMaskedLM(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForSequenceClassification(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFConvBertForMultipleChoice(config=__a )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFConvBertForTokenClassification(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ (self , __a , __a , __a , __a , __a , __a , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertForQuestionAnswering(config=__a )
UpperCAmelCase__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
UpperCAmelCase__ = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = True
if hasattr(__a , 'use_cache' ):
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
for model_class in self.all_model_classes:
UpperCAmelCase__ = self._prepare_for_class(__a , __a )
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
UpperCAmelCase__ = os.path.join(__a , 'saved_model' , '1' )
UpperCAmelCase__ = tf.keras.models.load_model(__a )
UpperCAmelCase__ = model(__a )
if self.is_encoder_decoder:
UpperCAmelCase__ = outputs['encoder_hidden_states']
UpperCAmelCase__ = outputs['encoder_attentions']
else:
UpperCAmelCase__ = outputs['hidden_states']
UpperCAmelCase__ = outputs['attentions']
self.assertEqual(len(__a ) , __a )
UpperCAmelCase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
UpperCAmelCase__ = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
UpperCAmelCase__ = getattr(self.model_tester , 'key_length' , __a )
def check_decoder_attentions_output(__a ):
UpperCAmelCase__ = len(__a )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase__ = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a ):
UpperCAmelCase__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
UpperCAmelCase__ = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(__a )
UpperCAmelCase__ = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__a )[0]
UpperCAmelCase__ = [1, 6, 768]
self.assertEqual(output.shape , __a )
UpperCAmelCase__ = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 335 | 1 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowercase : int = logging.get_logger(__name__)
def A_ ( A__ ) -> Union[str, Any]:
a__ : Any = R'\w+[.]\d+'
a__ : int = re.findall(A__ , A__ )
for pat in pats:
a__ : List[Any] = key.replace(A__ , '_'.join(pat.split('.' ) ) )
return key
def A_ ( A__ , A__ , A__ ) -> Dict:
a__ : Optional[Any] = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
a__ : str = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
a__ : Optional[Any] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
a__ : Dict = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
a__ : Dict = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
a__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a__ : str = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
a__ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a__ : Tuple = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a__ : str = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A_ ( A__ , A__ , A__=42 ) -> str:
# Step 1: Convert pytorch tensor to numpy
a__ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
a__ : Tuple = flax_model.init_weights(PRNGKey(A__ ) )
a__ : Dict = flatten_dict(A__ )
a__ : Optional[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ : Tuple = rename_key(A__ )
a__ : List[str] = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
a__ , a__ : Tuple = rename_key_and_reshape_tensor(A__ , A__ , A__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
a__ : str = jnp.asarray(A__ )
return unflatten_dict(A__ )
| 99 |
from collections.abc import Iterable
from typing import Generic, TypeVar
lowercase : Any = TypeVar("""_T""")
class A__ ( Generic[_T] ):
"""simple docstring"""
def __init__( self , lowercase = None) -> None:
'''simple docstring'''
a__ : list[_T] = list(iterable or [])
a__ : list[_T] = []
def __len__( self) -> int:
'''simple docstring'''
return len(self._stacka) + len(self._stacka)
def __repr__( self) -> str:
'''simple docstring'''
return F'Queue({tuple(self._stacka[::-1] + self._stacka)})'
def __lowercase ( self , lowercase) -> None:
'''simple docstring'''
self._stacka.append(lowercase)
def __lowercase ( self) -> _T:
'''simple docstring'''
a__ : List[str] = self._stacka.pop
a__ : Optional[int] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop())
if not self._stacka:
raise IndexError('Queue is empty')
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 99 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class a :
"""simple docstring"""
def __init__( self : str , snake_case : Union[str, Any] , snake_case : Union[str, Any]=2 , snake_case : Union[str, Any]=True , snake_case : List[Any]=False , snake_case : Tuple=10 , snake_case : Any=3 , snake_case : int=32 * 8 , snake_case : str=32 * 8 , snake_case : Dict=4 , snake_case : Union[str, Any]=64 , ) -> Tuple:
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : Optional[int] = use_auxiliary_loss
__UpperCAmelCase : List[str] = num_queries
__UpperCAmelCase : Dict = num_channels
__UpperCAmelCase : Union[str, Any] = min_size
__UpperCAmelCase : List[Any] = max_size
__UpperCAmelCase : Tuple = num_labels
__UpperCAmelCase : str = hidden_dim
__UpperCAmelCase : List[str] = hidden_dim
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case )
__UpperCAmelCase : Tuple = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case )
__UpperCAmelCase : Dict = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case ) > 0.5
).float()
__UpperCAmelCase : Dict = (torch.rand((self.batch_size, self.num_labels) , device=snake_case ) > 0.5).long()
__UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCamelCase__ ( self : int ) -> int:
__UpperCAmelCase : Optional[int] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__UpperCAmelCase : Optional[Any] = self.num_queries
__UpperCAmelCase : List[str] = self.num_labels
__UpperCAmelCase : int = [1, 1, 1, 1]
__UpperCAmelCase : Dict = self.num_channels
__UpperCAmelCase : Tuple = 64
__UpperCAmelCase : int = 128
__UpperCAmelCase : Optional[Any] = self.hidden_dim
__UpperCAmelCase : Optional[Any] = self.hidden_dim
__UpperCAmelCase : int = self.hidden_dim
return config
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase : Union[str, Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowerCamelCase__ ( self : Optional[int] , snake_case : Any , snake_case : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = output.encoder_hidden_states
__UpperCAmelCase : Any = output.pixel_decoder_hidden_states
__UpperCAmelCase : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case ) , config.decoder_layers )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Dict , snake_case : str , snake_case : Dict , snake_case : Any=False ) -> Optional[int]:
with torch.no_grad():
__UpperCAmelCase : List[Any] = MaskaFormerModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[Any] = model(pixel_values=snake_case , pixel_mask=snake_case )
__UpperCAmelCase : Optional[Any] = model(snake_case , output_hidden_states=snake_case )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case , snake_case )
def lowerCamelCase__ ( self : Dict , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : int = MaskaFormerForUniversalSegmentation(config=snake_case )
model.to(snake_case )
model.eval()
def comm_check_on_output(snake_case : List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__UpperCAmelCase : int = model(pixel_values=snake_case , pixel_mask=snake_case )
__UpperCAmelCase : Any = model(snake_case )
comm_check_on_output(snake_case )
__UpperCAmelCase : List[str] = model(
pixel_values=snake_case , pixel_mask=snake_case , mask_labels=snake_case , class_labels=snake_case )
comm_check_on_output(snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Union[str, Any] = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[int] = MaskaFormerModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case , **snake_case , output_hidden_states=snake_case )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*snake_case )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def lowerCamelCase__ ( self : str ) -> List[Any]:
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
pass
def lowerCamelCase__ ( self : Tuple ) -> int:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = model_class(snake_case )
__UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
__UpperCAmelCase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__UpperCAmelCase : List[str] = MaskaFormerModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowerCamelCase__ ( self : List[str] ) -> int:
__UpperCAmelCase : str = (self.model_tester.min_size,) * 2
__UpperCAmelCase : Tuple = {
'''pixel_values''': torch.randn((2, 3, *size) , device=snake_case ),
'''mask_labels''': torch.randn((2, 10, *size) , device=snake_case ),
'''class_labels''': torch.zeros(2 , 10 , device=snake_case ).long(),
}
__UpperCAmelCase : Any = self.model_tester.get_config()
__UpperCAmelCase : Union[str, Any] = MaskaFormerForUniversalSegmentation(snake_case ).to(snake_case )
__UpperCAmelCase : Any = model(**snake_case )
self.assertTrue(outputs.loss is not None )
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case , **snake_case , output_hidden_states=snake_case )
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(snake_case ).to(snake_case )
__UpperCAmelCase : str = model(**snake_case , output_attentions=snake_case )
self.assertTrue(outputs.attentions is not None )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
if not self.model_tester.is_training:
return
__UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase : List[Any] = model_class(snake_case )
model.to(snake_case )
model.train()
__UpperCAmelCase : Union[str, Any] = model(snake_case , mask_labels=snake_case , class_labels=snake_case ).loss
loss.backward()
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
__UpperCAmelCase : List[Any] = self.all_model_classes[1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[str] = model_class(snake_case ).to(snake_case )
model.train()
__UpperCAmelCase : List[Any] = model(snake_case , mask_labels=snake_case , class_labels=snake_case )
__UpperCAmelCase : str = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__UpperCAmelCase : List[str] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__UpperCAmelCase : Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__UpperCAmelCase : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCAmelCase :List[str] = 1e-4
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : Any ) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCamelCase__ ( self : List[str] ) -> str:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
__UpperCAmelCase : Any = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(snake_case )
__UpperCAmelCase : Dict = self.default_image_processor
__UpperCAmelCase : Dict = prepare_img()
__UpperCAmelCase : Dict = image_processor(snake_case , return_tensors='''pt''' ).to(snake_case )
__UpperCAmelCase : int = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case , (1, 3, 384, 384) )
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**snake_case )
__UpperCAmelCase : Dict = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case , atol=snake_case ) )
__UpperCAmelCase : str = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case , atol=snake_case ) )
__UpperCAmelCase : Tuple = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case , atol=snake_case ) )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case ).eval()
__UpperCAmelCase : List[Any] = self.default_image_processor
__UpperCAmelCase : Dict = prepare_img()
__UpperCAmelCase : Dict = image_processor(snake_case , return_tensors='''pt''' ).to(snake_case )
__UpperCAmelCase : List[str] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case , (1, 3, 384, 384) )
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**snake_case )
# masks_queries_logits
__UpperCAmelCase : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__UpperCAmelCase : Optional[Any] = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
__UpperCAmelCase : Dict = torch.tensor(snake_case ).to(snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case , atol=snake_case ) )
# class_queries_logits
__UpperCAmelCase : Optional[Any] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__UpperCAmelCase : Dict = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case , atol=snake_case ) )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case ).eval()
__UpperCAmelCase : Tuple = self.default_image_processor
__UpperCAmelCase : Union[str, Any] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
__UpperCAmelCase : List[Any] = inputs['''pixel_values'''].to(snake_case )
__UpperCAmelCase : int = [el.to(snake_case ) for el in inputs['''mask_labels''']]
__UpperCAmelCase : Optional[Any] = [el.to(snake_case ) for el in inputs['''class_labels''']]
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**snake_case )
self.assertTrue(outputs.loss is not None ) | 240 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class a ( _a ):
"""simple docstring"""
def __init__( self : Optional[Any] ) -> int:
# test for the above condition
self.test()
def lowerCamelCase__ ( self : Dict ) -> List[str]:
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Any = False
while not completed:
if counter == 1:
self.reset()
__UpperCAmelCase : Optional[int] = self.advance()
if not self.does_advance(snake_case ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = self.update(snake_case )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self : Optional[int] , snake_case : int ) -> Optional[int]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self : List[Any] , snake_case : int ) -> int:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self : int ) -> Optional[int]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self : int ) -> Tuple:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : List[Any]=False ) -> Any:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class a ( _a ):
"""simple docstring"""
def __init__( self : int , snake_case : List[int] ) -> Tuple:
super(snake_case , self ).__init__()
if not isinstance(snake_case , snake_case ) or len(snake_case ) == 0:
raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(snake_case , snake_case ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
__UpperCAmelCase : Dict = token_ids
__UpperCAmelCase : Tuple = len(self.token_ids )
__UpperCAmelCase : List[str] = -1 # the index of the currently fulfilled step
__UpperCAmelCase : int = False
def lowerCamelCase__ ( self : List[str] ) -> str:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__ ( self : Any , snake_case : int ) -> Optional[int]:
if not isinstance(snake_case , snake_case ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(snake_case )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int ) -> Optional[int]:
if not isinstance(snake_case , snake_case ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(snake_case )}' )
__UpperCAmelCase : Any = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Tuple = False
if self.does_advance(snake_case ):
self.fulfilled_idx += 1
__UpperCAmelCase : Union[str, Any] = True
if self.fulfilled_idx == (self.seqlen - 1):
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Union[str, Any] = completed
else:
# failed to make progress.
__UpperCAmelCase : List[str] = True
self.reset()
return stepped, completed, reset
def lowerCamelCase__ ( self : int ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Union[str, Any] = 0
def lowerCamelCase__ ( self : str ) -> Optional[int]:
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCamelCase__ ( self : int , snake_case : Dict=False ) -> List[str]:
__UpperCAmelCase : List[str] = PhrasalConstraint(self.token_ids )
if stateful:
__UpperCAmelCase : int = self.seqlen
__UpperCAmelCase : Optional[Any] = self.fulfilled_idx
__UpperCAmelCase : List[Any] = self.completed
return new_constraint
class a :
"""simple docstring"""
def __init__( self : List[str] , snake_case : List[List[int]] , snake_case : Dict=True ) -> Any:
__UpperCAmelCase : List[Any] = max([len(snake_case ) for one in nested_token_ids] )
__UpperCAmelCase : Union[str, Any] = {}
for token_ids in nested_token_ids:
__UpperCAmelCase : List[str] = root
for tidx, token_id in enumerate(snake_case ):
if token_id not in level:
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = level[token_id]
if no_subsets and self.has_subsets(snake_case , snake_case ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f' {nested_token_ids}.' )
__UpperCAmelCase : Tuple = root
def lowerCamelCase__ ( self : Dict , snake_case : List[str] ) -> List[Any]:
__UpperCAmelCase : Dict = self.trie
for current_token in current_seq:
__UpperCAmelCase : List[str] = start[current_token]
__UpperCAmelCase : str = list(start.keys() )
return next_tokens
def lowerCamelCase__ ( self : Optional[Any] , snake_case : List[str] ) -> Any:
__UpperCAmelCase : Optional[Any] = self.next_tokens(snake_case )
return len(snake_case ) == 0
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int ) -> Optional[int]:
__UpperCAmelCase : str = list(root.values() )
if len(snake_case ) == 0:
return 1
else:
return sum([self.count_leaves(snake_case ) for nn in next_nodes] )
def lowerCamelCase__ ( self : Optional[int] , snake_case : int , snake_case : Dict ) -> str:
__UpperCAmelCase : Dict = self.count_leaves(snake_case )
return len(snake_case ) != leaf_count
class a ( _a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : List[List[int]] ) -> str:
super(snake_case , self ).__init__()
if not isinstance(snake_case , snake_case ) or len(snake_case ) == 0:
raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(snake_case , snake_case ) for token_ids in nested_token_ids ):
raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(snake_case , snake_case ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
__UpperCAmelCase : Optional[int] = DisjunctiveTrie(snake_case )
__UpperCAmelCase : Tuple = nested_token_ids
__UpperCAmelCase : List[Any] = self.trie.max_height
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Union[str, Any] = False
def lowerCamelCase__ ( self : int ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.trie.next_tokens(self.current_seq )
if len(snake_case ) == 0:
return None
else:
return token_list
def lowerCamelCase__ ( self : Tuple , snake_case : int ) -> Dict:
if not isinstance(snake_case , snake_case ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(snake_case )}' )
__UpperCAmelCase : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCamelCase__ ( self : Any , snake_case : int ) -> Tuple:
if not isinstance(snake_case , snake_case ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(snake_case )}' )
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : str = False
if self.does_advance(snake_case ):
self.current_seq.append(snake_case )
__UpperCAmelCase : int = True
else:
__UpperCAmelCase : Optional[Any] = True
self.reset()
__UpperCAmelCase : Optional[Any] = self.trie.reached_leaf(self.current_seq )
__UpperCAmelCase : Tuple = completed
return stepped, completed, reset
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = []
def lowerCamelCase__ ( self : Tuple ) -> Any:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCamelCase__ ( self : Any , snake_case : Optional[int]=False ) -> Tuple:
__UpperCAmelCase : str = DisjunctiveConstraint(self.token_ids )
if stateful:
__UpperCAmelCase : Tuple = self.seqlen
__UpperCAmelCase : Dict = self.current_seq
__UpperCAmelCase : str = self.completed
return new_constraint
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : List[Constraint] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = constraints
# max # of steps required to fulfill a given constraint
__UpperCAmelCase : int = max([c.seqlen for c in constraints] )
__UpperCAmelCase : int = len(snake_case )
__UpperCAmelCase : Optional[int] = False
self.init_state()
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : str = [constraint.copy(stateful=snake_case ) for constraint in self.constraints]
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Dict = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCamelCase__ ( self : Tuple ) -> int:
__UpperCAmelCase : int = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__UpperCAmelCase : Optional[int] = constraint.advance()
if isinstance(snake_case , snake_case ):
token_list.append(snake_case )
elif isinstance(snake_case , snake_case ):
token_list.extend(snake_case )
else:
__UpperCAmelCase : Optional[Any] = self.inprogress_constraint.advance()
if isinstance(snake_case , snake_case ):
token_list.append(snake_case )
elif isinstance(snake_case , snake_case ):
token_list.extend(snake_case )
if len(snake_case ) == 0:
return None
else:
return token_list
def lowerCamelCase__ ( self : List[str] , snake_case : Optional[List[int]] ) -> Optional[int]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__UpperCAmelCase , __UpperCAmelCase : Dict = self.add(snake_case )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCamelCase__ ( self : List[str] , snake_case : int ) -> List[str]:
if not isinstance(snake_case , snake_case ):
raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.' )
__UpperCAmelCase , __UpperCAmelCase : str = False, False
if self.completed:
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : str = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = self.inprogress_constraint.update(snake_case )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=snake_case ) )
__UpperCAmelCase : Optional[Any] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__UpperCAmelCase : str = None
if len(self.pending_constraints ) == 0:
# we're done!
__UpperCAmelCase : Optional[int] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(snake_case ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = pending_constraint.update(snake_case )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(snake_case )
__UpperCAmelCase : Tuple = None
if not complete and stepped:
__UpperCAmelCase : List[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__UpperCAmelCase : Optional[int] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__UpperCAmelCase : Any = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCamelCase__ ( self : int , snake_case : Optional[int]=True ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__UpperCAmelCase : str = [
constraint.copy(stateful=snake_case ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__UpperCAmelCase : Union[str, Any] = self.inprogress_constraint.copy(stateful=snake_case )
__UpperCAmelCase : Tuple = [constraint.copy() for constraint in self.pending_constraints]
return new_state | 240 | 1 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class a ( lowerCAmelCase_ ):
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__lowerCAmelCase , """num_attention_heads""" ) )
class a :
def __init__( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : Tuple=64 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Any=[128, 256, 384] , __lowerCAmelCase : Optional[Any]=[4, 6, 8] , __lowerCAmelCase : Optional[Any]=[2, 3, 4] , __lowerCAmelCase : Tuple=[16, 16, 16] , __lowerCAmelCase : Any=0 , __lowerCAmelCase : Optional[Any]=[2, 2, 2] , __lowerCAmelCase : Optional[Any]=[2, 2, 2] , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=2 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = kernel_size
_UpperCAmelCase = stride
_UpperCAmelCase = padding
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = depths
_UpperCAmelCase = key_dim
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = patch_size
_UpperCAmelCase = attention_ratio
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = initializer_range
_UpperCAmelCase = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : List[Any] ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ):
_UpperCAmelCase = LevitModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
_UpperCAmelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
_UpperCAmelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LevitForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Dict = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_snake_case : Tuple = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_snake_case : Union[str, Any] = False
_snake_case : List[Any] = False
_snake_case : Any = False
_snake_case : List[str] = False
_snake_case : Tuple = False
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = LevitModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self : int ):
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : int ):
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__lowerCAmelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
def check_hidden_states_output(__lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = len(self.model_tester.depths ) + 1
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
_UpperCAmelCase = (self.model_tester.image_size, self.model_tester.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
_UpperCAmelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
_UpperCAmelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : List[str]=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
if not self.model_tester.is_training:
return
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_UpperCAmelCase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_UpperCAmelCase = model(**__lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_UpperCAmelCase = False
_UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_UpperCAmelCase = model_class(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCAmelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_UpperCAmelCase = model(**__lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
_UpperCAmelCase = problem_type["""title"""]
_UpperCAmelCase = problem_type["""num_labels"""]
_UpperCAmelCase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if problem_type["num_labels"] > 1:
_UpperCAmelCase = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
_UpperCAmelCase = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCAmelCase ) as warning_list:
_UpperCAmelCase = model(**__lowerCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def lowerCAmelCase_ ( self : List[str] ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LevitModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self : int ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowerCAmelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__lowerCAmelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 289 | """simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 289 | 1 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCAmelCase__ :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class __a ( UpperCAmelCase ):
def __init__( self , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
super().__init__(**__A )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(__A , **__A )
def UpperCAmelCase__ ( self , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_UpperCAmelCase = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="This is a sound of {}." ) -> List[Any]:
"""simple docstring"""
if isinstance(__A , __A ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_UpperCAmelCase = requests.get(__A ).content
else:
with open(__A , 'rb' ) as f:
_UpperCAmelCase = f.read()
if isinstance(__A , __A ):
_UpperCAmelCase = ffmpeg_read(__A , self.feature_extractor.sampling_rate )
if not isinstance(__A , np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
_UpperCAmelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt' )
_UpperCAmelCase = candidate_labels
_UpperCAmelCase = [hypothesis_template.format(__A ) for x in candidate_labels]
_UpperCAmelCase = self.tokenizer(__A , return_tensors=self.framework , padding=__A )
_UpperCAmelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = model_inputs.pop('candidate_labels' )
_UpperCAmelCase = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , __A ):
_UpperCAmelCase = text_inputs[0]
else:
# Batching case.
_UpperCAmelCase = text_inputs[0][0]
_UpperCAmelCase = self.model(**__A , **__A )
_UpperCAmelCase = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = model_outputs.pop('candidate_labels' )
_UpperCAmelCase = model_outputs['logits'][0]
if self.framework == "pt":
_UpperCAmelCase = logits.softmax(dim=0 )
_UpperCAmelCase = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
_UpperCAmelCase = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(__A , __A ) , key=lambda _SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 359 |
from importlib import import_module
from .logging import get_logger
lowerCAmelCase__ :Optional[Any] = get_logger(__name__)
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = module._original_module if isinstance(_SCREAMING_SNAKE_CASE , _PatchedModuleObj ) else module
class __a :
_a : Any = []
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = obj
_UpperCAmelCase = target
_UpperCAmelCase = new
_UpperCAmelCase = target.split('.' )[0]
_UpperCAmelCase = {}
_UpperCAmelCase = attrs or []
def __enter__( self ) -> int:
"""simple docstring"""
*_UpperCAmelCase , _UpperCAmelCase = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
try:
_UpperCAmelCase = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_UpperCAmelCase = getattr(self.obj , _SCREAMING_SNAKE_CASE )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_SCREAMING_SNAKE_CASE , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_UpperCAmelCase = obj_attr
# patch at top level
setattr(self.obj , _SCREAMING_SNAKE_CASE , _PatchedModuleObj(_SCREAMING_SNAKE_CASE , attrs=self.attrs ) )
_UpperCAmelCase = getattr(self.obj , _SCREAMING_SNAKE_CASE )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _PatchedModuleObj(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , attrs=self.attrs ) )
_UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# finally set the target attribute
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_UpperCAmelCase = getattr(import_module('.'.join(_SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _SCREAMING_SNAKE_CASE ) is attr_value:
_UpperCAmelCase = getattr(self.obj , _SCREAMING_SNAKE_CASE )
setattr(self.obj , _SCREAMING_SNAKE_CASE , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_UpperCAmelCase = globals()['__builtins__'][target_attr]
setattr(self.obj , _SCREAMING_SNAKE_CASE , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self , *_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , _SCREAMING_SNAKE_CASE , self.original.pop(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 185 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class snake_case__:
'''simple docstring'''
def __init__( self ) -> Any:
lowerCAmelCase_ : List[str] = ''''''
lowerCAmelCase_ : Any = ''''''
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Tuple = 2_5_6
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : str = 0
def lowercase_ ( self , __lowercase ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = cva.imread(__lowercase , 0 )
lowerCAmelCase_ : List[Any] = copy.deepcopy(self.img )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label='''x''' )
lowerCAmelCase_ : Optional[int] = np.sum(__lowercase )
for i in range(len(__lowercase ) ):
lowerCAmelCase_ : Tuple = x[i] / self.k
self.sk += prk
lowerCAmelCase_ : str = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase_ : Optional[int] = int(last % last )
lowerCAmelCase_ : Union[str, Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__lowercase )
lowerCAmelCase_ : int = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase_ : Optional[int] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase_ : List[str] = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase_ : Tuple = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def lowercase_ ( self ) -> str:
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def lowercase_ ( self ) -> List[Any]:
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCAmelCase : Tuple =os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
_UpperCAmelCase : str =ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image() | 262 |
def lowerCAmelCase ( lowerCAmelCase_ )-> set:
lowerCAmelCase_ : Optional[int] = set()
# edges = list of graph's edges
lowerCAmelCase_ : List[str] = get_edges(lowerCAmelCase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = edges.pop()
chosen_vertices.add(lowerCAmelCase_ )
chosen_vertices.add(lowerCAmelCase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCAmelCase_ )
return chosen_vertices
def lowerCAmelCase ( lowerCAmelCase_ )-> set:
lowerCAmelCase_ : List[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}") | 262 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : Optional[Any] = {"vocab_file": "spm_char.model"}
lowerCAmelCase__ : Any = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
lowerCAmelCase__ : Tuple = {
"microsoft/speecht5_asr": 10_24,
"microsoft/speecht5_tts": 10_24,
"microsoft/speecht5_vc": 10_24,
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : List[str]="<pad>" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
__UpperCAmelCase : Union[str, Any] = vocab_file
__UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
"""simple docstring"""
__UpperCAmelCase : str = self.__dict__.copy()
__UpperCAmelCase : List[Any] = None
return state
def __setstate__( self : List[Any] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : str = {}
__UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase_ : str ):
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : Any ):
"""simple docstring"""
return self.sp_model.piece_to_id(UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.sp_model.IdToPiece(UpperCAmelCase_ )
return token
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : int = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
__UpperCAmelCase : Union[str, Any] = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
__UpperCAmelCase : int = [1]
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_ )) + suffix_ones
return ([0] * len(UpperCAmelCase_ )) + ([0] * len(UpperCAmelCase_ )) + suffix_ones
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , "wb" ) as fi:
__UpperCAmelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 369 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[Any] , **UpperCAmelCase_ : Dict ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : List[str] , UpperCAmelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_ : Tuple ):
"""simple docstring"""
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = {}
if "candidate_labels" in kwargs:
__UpperCAmelCase : Union[str, Any] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__UpperCAmelCase : int = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]="This is a photo of {}." ):
"""simple docstring"""
__UpperCAmelCase : Tuple = load_image(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCAmelCase : Dict = candidate_labels
__UpperCAmelCase : Any = [hypothesis_template.format(UpperCAmelCase_ ) for x in candidate_labels]
__UpperCAmelCase : Optional[int] = self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = [text_inputs]
return inputs
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = model_inputs.pop("candidate_labels" )
__UpperCAmelCase : str = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = text_inputs[0]
else:
# Batching case.
__UpperCAmelCase : Optional[int] = text_inputs[0][0]
__UpperCAmelCase : Any = self.model(**UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Dict = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : Dict ):
"""simple docstring"""
__UpperCAmelCase : Any = model_outputs.pop("candidate_labels" )
__UpperCAmelCase : Tuple = model_outputs["logits"][0]
if self.framework == "pt":
__UpperCAmelCase : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCAmelCase : Dict = probs.tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = [scores]
elif self.framework == "tf":
__UpperCAmelCase : Union[str, Any] = stable_softmax(UpperCAmelCase_ , axis=-1 )
__UpperCAmelCase : List[str] = probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__UpperCAmelCase : Dict = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_ ) , key=lambda UpperCAmelCase_ : -x[0] )
]
return result
| 37 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Tuple ="pix2struct_text_model"
a : Optional[int] =["past_key_values"]
a : Union[str, Any] ={
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=50_244 , snake_case__=768 , snake_case__=64 , snake_case__=2_048 , snake_case__=12 , snake_case__=12 , snake_case__=32 , snake_case__=128 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=1.0 , snake_case__="gelu_new" , snake_case__=0 , snake_case__=False , snake_case__=0 , snake_case__=1 , snake_case__=False , snake_case__=True , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : List[Any] = d_kv
lowerCAmelCase : str = d_ff
lowerCAmelCase : Union[str, Any] = num_layers
lowerCAmelCase : Dict = num_heads
lowerCAmelCase : Optional[int] = relative_attention_num_buckets
lowerCAmelCase : Union[str, Any] = relative_attention_max_distance
lowerCAmelCase : str = dropout_rate
lowerCAmelCase : Union[str, Any] = layer_norm_epsilon
lowerCAmelCase : List[Any] = initializer_factor
lowerCAmelCase : int = use_cache
lowerCAmelCase : List[Any] = eos_token_id
lowerCAmelCase : Tuple = decoder_start_token_id
# for backwards compatibility
lowerCAmelCase : int = dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def lowercase__ ( cls , snake_case__ , **snake_case__ ):
"""simple docstring"""
cls._set_token_in_kwargs(snake_case__ )
lowerCAmelCase , lowerCAmelCase : int = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCAmelCase : List[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] ="pix2struct_vision_model"
def __init__( self , snake_case__=768 , snake_case__=768 , snake_case__=2_048 , snake_case__=64 , snake_case__=12 , snake_case__=12 , snake_case__="gelu_new" , snake_case__=1e-6 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-10 , snake_case__=1.0 , snake_case__=4_096 , snake_case__=32 , snake_case__=128 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Tuple = patch_embed_hidden_size
lowerCAmelCase : Tuple = d_ff
lowerCAmelCase : Optional[int] = dropout_rate
lowerCAmelCase : Dict = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : Optional[int] = initializer_range
lowerCAmelCase : List[str] = initializer_factor
lowerCAmelCase : List[Any] = attention_dropout
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Optional[int] = dense_act_fn
lowerCAmelCase : Union[str, Any] = seq_len
lowerCAmelCase : Optional[Any] = relative_attention_num_buckets
lowerCAmelCase : Any = relative_attention_max_distance
lowerCAmelCase : Dict = d_kv
@classmethod
def lowercase__ ( cls , snake_case__ , **snake_case__ ):
"""simple docstring"""
cls._set_token_in_kwargs(snake_case__ )
lowerCAmelCase , lowerCAmelCase : Dict = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCAmelCase : List[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] ="pix2struct"
a : Union[str, Any] =True
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=1.0 , snake_case__=0.02 , snake_case__=False , snake_case__=False , snake_case__=True , **snake_case__ , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowerCAmelCase : Any = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowerCAmelCase : List[Any] = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowerCAmelCase : Union[str, Any] = PixaStructTextConfig(**snake_case__ )
lowerCAmelCase : Optional[Any] = PixaStructVisionConfig(**snake_case__ )
lowerCAmelCase : str = self.text_config.decoder_start_token_id
lowerCAmelCase : Optional[Any] = self.text_config.pad_token_id
lowerCAmelCase : str = self.text_config.eos_token_id
lowerCAmelCase : List[Any] = initializer_factor
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Optional[Any] = self.initializer_range
lowerCAmelCase : Optional[Any] = self.initializer_range
lowerCAmelCase : List[Any] = is_vqa
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__ , **snake_case__ ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = copy.deepcopy(self.__dict__ )
lowerCAmelCase : Tuple = self.text_config.to_dict()
lowerCAmelCase : Union[str, Any] = self.vision_config.to_dict()
lowerCAmelCase : str = self.__class__.model_type
return output
| 108 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="dpr"
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__="absolute" , snake_case__ = 0 , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Optional[int] = num_attention_heads
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase : Dict = attention_probs_dropout_prob
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : Dict = projection_dim
lowerCAmelCase : Dict = position_embedding_type
| 108 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[str] = "xlm"
UpperCAmelCase_ :Any = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self , __A=3_0145 , __A=2048 , __A=12 , __A=16 , __A=0.1 , __A=0.1 , __A=True , __A=False , __A=False , __A=False , __A=1 , __A=True , __A=512 , __A=2048**-0.5 , __A=1E-12 , __A=0.0_2 , __A=0 , __A=1 , __A=2 , __A=3 , __A=5 , __A=True , __A="first" , __A=True , __A=None , __A=True , __A=0.1 , __A=5 , __A=5 , __A=0 , __A=0 , __A=2 , __A=0 , **__A , ) -> Union[str, Any]:
lowerCAmelCase_ :Any = vocab_size
lowerCAmelCase_ :Tuple = emb_dim
lowerCAmelCase_ :int = n_layers
lowerCAmelCase_ :Any = n_heads
lowerCAmelCase_ :Optional[int] = dropout
lowerCAmelCase_ :Optional[Any] = attention_dropout
lowerCAmelCase_ :str = gelu_activation
lowerCAmelCase_ :int = sinusoidal_embeddings
lowerCAmelCase_ :List[str] = causal
lowerCAmelCase_ :str = asm
lowerCAmelCase_ :Tuple = n_langs
lowerCAmelCase_ :List[str] = use_lang_emb
lowerCAmelCase_ :Optional[Any] = layer_norm_eps
lowerCAmelCase_ :Optional[Any] = bos_index
lowerCAmelCase_ :List[Any] = eos_index
lowerCAmelCase_ :Tuple = pad_index
lowerCAmelCase_ :Dict = unk_index
lowerCAmelCase_ :List[str] = mask_index
lowerCAmelCase_ :Optional[int] = is_encoder
lowerCAmelCase_ :Optional[Any] = max_position_embeddings
lowerCAmelCase_ :Dict = embed_init_std
lowerCAmelCase_ :str = init_std
lowerCAmelCase_ :Dict = summary_type
lowerCAmelCase_ :Tuple = summary_use_proj
lowerCAmelCase_ :str = summary_activation
lowerCAmelCase_ :Union[str, Any] = summary_proj_to_labels
lowerCAmelCase_ :Optional[int] = summary_first_dropout
lowerCAmelCase_ :int = start_n_top
lowerCAmelCase_ :Tuple = end_n_top
lowerCAmelCase_ :int = mask_token_id
lowerCAmelCase_ :Optional[Any] = lang_id
if "n_words" in kwargs:
lowerCAmelCase_ :Union[str, Any] = kwargs["""n_words"""]
super().__init__(pad_token_id=__A , bos_token_id=__A , **__A )
class _SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ :Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ :int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 1 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = 0
def __lowerCAmelCase ( self ) -> List[str]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""model""" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""bert""" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ :Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" )
def __lowerCAmelCase ( self ) -> Any:
with self.assertRaisesRegex(
__A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" )
def __lowerCAmelCase ( self ) -> int:
with self.assertRaisesRegex(
__A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowerCAmelCase ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowerCAmelCase ( self ) -> int:
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :int = "new-model"
try:
AutoConfig.register("""new-model""" , __A )
# If remote code is not set, the default is to use local
lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 1 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : str = logging.get_logger(__name__)
snake_case : Optional[int] = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'mobilenet_v1'
def __init__( self , _lowerCamelCase=3 , _lowerCamelCase=224 , _lowerCamelCase=1.0 , _lowerCamelCase=8 , _lowerCamelCase="relu6" , _lowerCamelCase=True , _lowerCamelCase=0.999 , _lowerCamelCase=0.02 , _lowerCamelCase=0.001 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
a :Optional[Any] = num_channels
a :Any = image_size
a :int = depth_multiplier
a :Dict = min_depth
a :Union[str, Any] = hidden_act
a :List[str] = tf_padding
a :Union[str, Any] = classifier_dropout_prob
a :Optional[Any] = initializer_range
a :str = layer_norm_eps
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1e-4
| 94 |
from math import pi, sqrt
def __A ( __lowerCamelCase ) -> float:
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __A ( ) -> None:
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCamelCase : str = 1.0
while num:
__UpperCamelCase : Dict = float(input("Gamma of: "))
print(F'gamma({num}) = {gamma(num)}')
print("\nEnter 0 to exit...")
| 228 | 0 |
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Dict=7 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Dict=99 , _UpperCAmelCase : str=64 , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Union[str, Any]=37 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Any=5_12 , _UpperCAmelCase : Dict=16 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : Optional[Any]=None , ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = embedding_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = MobileBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase__ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = MobileBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = MobileBertForNextSentencePrediction(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = MobileBertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = MobileBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = MobileBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = MobileBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = MobileBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : List[str] = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Tuple = True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=False ):
"""simple docstring"""
UpperCAmelCase__ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
UpperCAmelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
UpperCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = MobileBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCAmelCase )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
SCREAMING_SNAKE_CASE__ , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
UpperCAmelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(_UpperCAmelCase )
UpperCAmelCase__ = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
UpperCAmelCase__ = model(_UpperCAmelCase )[0]
UpperCAmelCase__ = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase__ = torch.tensor(
[
[
[-2.4736526E07, 8.2691656E04, 1.6521838E05],
[-5.7541704E-01, 3.9056022E00, 4.4011507E00],
[2.6047359E00, 1.5677652E00, -1.7324188E-01],
]
] , device=_UpperCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase__ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase__ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 61 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCAmelCase_ = logging.get_logger(__name__)
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCAmelCase__ = json.loads(SCREAMING_SNAKE_CASE__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCAmelCase__ = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCAmelCase__ = json.loads(SCREAMING_SNAKE_CASE__ )
if not mpi_options.get("""sagemaker_mpi_enabled""" , SCREAMING_SNAKE_CASE__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , _UpperCAmelCase , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
UpperCAmelCase__ = torch.device("""cpu""" )
UpperCAmelCase__ = 0
elif is_sagemaker_model_parallel_available():
UpperCAmelCase__ = smp.local_rank()
UpperCAmelCase__ = torch.device("""cuda""" , _UpperCAmelCase )
UpperCAmelCase__ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
UpperCAmelCase__ = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
UpperCAmelCase__ = torch.device("""cuda""" , self.local_rank )
UpperCAmelCase__ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCAmelCase__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCAmelCase__ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
UpperCAmelCase__ = torch.device("""cuda""" , self.local_rank )
UpperCAmelCase__ = 1
if device.type == "cuda":
torch.cuda.set_device(_UpperCAmelCase )
return device
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return False
| 61 | 1 |
from __future__ import annotations
def A_ ( _lowerCAmelCase ) -> list[int]: # This function is recursive
UpperCamelCase : Union[str, Any] = len(_lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
UpperCamelCase : Tuple = array[0]
UpperCamelCase : int = False
UpperCamelCase : int = 1
UpperCamelCase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
UpperCamelCase : int = True
UpperCamelCase : Dict = [element for element in array[i:] if element >= array[i]]
UpperCamelCase : List[Any] = longest_subsequence(_lowerCAmelCase )
if len(_lowerCAmelCase ) > len(_lowerCAmelCase ):
UpperCamelCase : List[str] = temp_array
else:
i += 1
UpperCamelCase : Any = [element for element in array[1:] if element >= pivot]
UpperCamelCase : List[str] = [pivot, *longest_subsequence(_lowerCAmelCase )]
if len(_lowerCAmelCase ) > len(_lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__A : Tuple = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__A : Tuple = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Dict = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ), dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase, 'Please use tf.data to implement this functionality.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
print('Extracting', f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
lowerCAmelCase : List[str] = _readaa(_UpperCAmelCase )
if magic != 2_051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
lowerCAmelCase : Optional[Any] = _readaa(_UpperCAmelCase )
lowerCAmelCase : Any = _readaa(_UpperCAmelCase )
lowerCAmelCase : List[Any] = _readaa(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = bytestream.read(rows * cols * num_images )
lowerCAmelCase : Any = numpy.frombuffer(_UpperCAmelCase, dtype=numpy.uinta )
lowerCAmelCase : Optional[int] = data.reshape(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, 1 )
return data
@deprecated(_UpperCAmelCase, 'Please use tf.one_hot on tensors.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = labels_dense.shape[0]
lowerCAmelCase : Union[str, Any] = numpy.arange(_UpperCAmelCase ) * num_classes
lowerCAmelCase : List[str] = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase : List[str] = 1
return labels_one_hot
@deprecated(_UpperCAmelCase, 'Please use tf.data to implement this functionality.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=10 ) -> List[str]:
'''simple docstring'''
print('Extracting', f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
lowerCAmelCase : List[str] = _readaa(_UpperCAmelCase )
if magic != 2_049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
lowerCAmelCase : Optional[Any] = _readaa(_UpperCAmelCase )
lowerCAmelCase : Dict = bytestream.read(_UpperCAmelCase )
lowerCAmelCase : Dict = numpy.frombuffer(_UpperCAmelCase, dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase, _UpperCAmelCase )
return labels
class __A :
@deprecated(
UpperCAmelCase_ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=dtypes.floataa , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[Any]=None , ):
lowerCAmelCase , lowerCAmelCase : int = random_seed.get_seed(UpperCAmelCase_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase : List[str] = dtypes.as_dtype(UpperCAmelCase_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
lowerCAmelCase : Dict = 10000
lowerCAmelCase : Any = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"images.shape: {images.shape} labels.shape: {labels.shape}"
lowerCAmelCase : Optional[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase : Union[str, Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase : Optional[int] = images.astype(numpy.floataa )
lowerCAmelCase : Dict = numpy.multiply(UpperCAmelCase_ , 1.0 / 2_55.0 )
lowerCAmelCase : List[str] = images
lowerCAmelCase : List[str] = labels
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[int] = 0
@property
def lowercase__ ( self : str ):
return self._images
@property
def lowercase__ ( self : Dict ):
return self._labels
@property
def lowercase__ ( self : List[Any] ):
return self._num_examples
@property
def lowercase__ ( self : Any ):
return self._epochs_completed
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=True ):
if fake_data:
lowerCAmelCase : Union[str, Any] = [1] * 784
lowerCAmelCase : Dict = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(UpperCAmelCase_ )],
[fake_label for _ in range(UpperCAmelCase_ )],
)
lowerCAmelCase : Union[str, Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = self.images[perma]
lowerCAmelCase : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase : Tuple = self._num_examples - start
lowerCAmelCase : Union[str, Any] = self._images[start : self._num_examples]
lowerCAmelCase : Tuple = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase : Dict = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = self.images[perm]
lowerCAmelCase : Optional[Any] = self.labels[perm]
# Start next epoch
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Dict = batch_size - rest_num_examples
lowerCAmelCase : int = self._index_in_epoch
lowerCAmelCase : Union[str, Any] = self._images[start:end]
lowerCAmelCase : int = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase : Optional[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase, 'Please write your own downloading logic.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = os.path.join(_UpperCAmelCase, _UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase, _UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
lowerCAmelCase : List[Any] = f.size()
print('Successfully downloaded', _UpperCAmelCase, _UpperCAmelCase, 'bytes.' )
return filepath
@deprecated(
_UpperCAmelCase, 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=dtypes.floataa, _UpperCAmelCase=True, _UpperCAmelCase=5_000, _UpperCAmelCase=None, _UpperCAmelCase=DEFAULT_SOURCE_URL, ) -> Tuple:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=_UpperCAmelCase, one_hot=_UpperCAmelCase, dtype=_UpperCAmelCase, seed=_UpperCAmelCase )
lowerCAmelCase : Tuple = fake()
lowerCAmelCase : Optional[Any] = fake()
lowerCAmelCase : List[Any] = fake()
return _Datasets(train=_UpperCAmelCase, validation=_UpperCAmelCase, test=_UpperCAmelCase )
if not source_url: # empty string check
lowerCAmelCase : Any = DEFAULT_SOURCE_URL
lowerCAmelCase : Optional[Any] = 'train-images-idx3-ubyte.gz'
lowerCAmelCase : Any = 'train-labels-idx1-ubyte.gz'
lowerCAmelCase : int = 't10k-images-idx3-ubyte.gz'
lowerCAmelCase : Union[str, Any] = 't10k-labels-idx1-ubyte.gz'
lowerCAmelCase : str = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + train_images_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : Any = _extract_images(_UpperCAmelCase )
lowerCAmelCase : Tuple = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : int = _extract_labels(_UpperCAmelCase, one_hot=_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + test_images_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : List[Any] = _extract_images(_UpperCAmelCase )
lowerCAmelCase : Any = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : List[str] = _extract_labels(_UpperCAmelCase, one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
lowerCAmelCase : str = (
'Validation size should be between 0 and '
f"{len(_UpperCAmelCase )}. Received: {validation_size}."
)
raise ValueError(_UpperCAmelCase )
lowerCAmelCase : str = train_images[:validation_size]
lowerCAmelCase : Dict = train_labels[:validation_size]
lowerCAmelCase : List[str] = train_images[validation_size:]
lowerCAmelCase : str = train_labels[validation_size:]
lowerCAmelCase : str = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
lowerCAmelCase : int = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase, validation=_UpperCAmelCase, test=_UpperCAmelCase )
| 138 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1000 ) -> Dict:
'''simple docstring'''
snake_case_ = 2**power
snake_case_ = str(_A )
snake_case_ = list(_A )
snake_case_ = 0
for i in list_num:
sum_of_num += int(_A )
return sum_of_num
if __name__ == "__main__":
a : List[str] = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
a : Optional[int] = solution(power)
print('Sum of the digits is: ', result)
| 353 |
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a : Union[str, Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
a : Any = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
a : int = spec.loader.load_module()
a : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a : str = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
a : str = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def __magic_name__ ( ) -> Any:
'''simple docstring'''
snake_case_ = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case_ = False
# source code of `config_class`
snake_case_ = inspect.getsource(__UpperCAmelCase )
snake_case_ = _re_checkpoint.findall(__UpperCAmelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case_ ,snake_case_ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
snake_case_ = True
break
snake_case_ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
snake_case_ = '''\n'''.join(sorted(__UpperCAmelCase ) )
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 72 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 90 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__A = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.feature_extractor
__lowerCamelCase = hf_model.adapter
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase = 'weight_v'
elif "bias" in name:
__lowerCamelCase = 'bias'
elif "weight" in name:
__lowerCamelCase = 'weight'
else:
__lowerCamelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ) -> int:
"""simple docstring"""
__lowerCamelCase = full_name.split('conv_layers.' )[-1]
__lowerCamelCase = name.split('.' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = full_name.split('adaptor.' )[-1]
__lowerCamelCase = name.split('.' )
if items[1].isdigit():
__lowerCamelCase = int(items[1] )
else:
__lowerCamelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
__lowerCamelCase = value
logger.info(F"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
__lowerCamelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
__lowerCamelCase = value
logger.info(F"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
__lowerCamelCase = value
logger.info(F"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
__lowerCamelCase = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
__lowerCamelCase = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
__lowerCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , ) -> str:
"""simple docstring"""
__lowerCamelCase = WavaVecaConfig.from_pretrained(
UpperCamelCase__ , add_adapter=UpperCamelCase__ , adapter_stride=UpperCamelCase__ , adapter_kernel_size=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , output_hidden_size=UpperCamelCase__ , )
__lowerCamelCase = MBartConfig.from_pretrained(UpperCamelCase__ )
# load model
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
__lowerCamelCase = model[0].eval()
# load feature extractor
__lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ , use_auth_token=UpperCamelCase__ )
# set weights for wav2vec2 encoder
__lowerCamelCase = WavaVecaModel(UpperCamelCase__ )
recursively_load_weights_wavaveca(model.encoder , UpperCamelCase__ )
# load decoder weights
__lowerCamelCase = MBartForCausalLM(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase__ )
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowerCamelCase = SpeechEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
__lowerCamelCase = False
__lowerCamelCase = MBartaaTokenizer(UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = hf_wavavec.config.to_dict()
__lowerCamelCase = tokenizer.pad_token_id
__lowerCamelCase = tokenizer.bos_token_id
__lowerCamelCase = tokenizer.eos_token_id
__lowerCamelCase = 'mbart50'
__lowerCamelCase = 'wav2vec2'
__lowerCamelCase = tokenizer.eos_token_id
__lowerCamelCase = 25_0004
__lowerCamelCase = tokenizer.eos_token_id
__lowerCamelCase = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase__ )
hf_wavavec.save_pretrained(UpperCamelCase__ )
feature_extractor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=10_24, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=25_00_04, type=int, help="`decoder_start_token_id` of model config")
__A = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 90 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=0.6 , _lowerCamelCase=None , ) -> Dict:
A_ : Tuple = parent
A_ : Tuple = batch_size
A_ : int = image_size
A_ : Optional[Any] = patch_size
A_ : int = num_channels
A_ : Dict = is_training
A_ : Tuple = use_labels
A_ : List[str] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = intermediate_size
A_ : int = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Optional[Any] = type_sequence_label_size
A_ : Optional[Any] = initializer_range
A_ : List[str] = mask_ratio
A_ : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A_ : Union[str, Any] = (image_size // patch_size) ** 2
A_ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Tuple = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> List[str]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : List[str] = TFViTMAEModel(config=_a )
A_ : List[Any] = model(_a , training=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
A_ : Dict = TFViTMAEForPreTraining(_a )
A_ : Any = model(_a , training=_a )
# expected sequence length = num_patches
A_ : Tuple = (self.image_size // self.patch_size) ** 2
A_ : Dict = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A_ : Dict = 1
A_ : int = TFViTMAEForPreTraining(_a )
A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Any = model(_a , training=_a )
A_ : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[int] = self.prepare_config_and_inputs()
(A_) : List[Any] = config_and_inputs
A_ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowerCamelCase = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Any:
A_ : Optional[int] = TFViTMAEModelTester(self )
A_ : List[str] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self ) -> str:
A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , tf.keras.layers.Layer ) )
def UpperCAmelCase_ ( self ) -> str:
A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Any = model_class(_a )
A_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
def UpperCAmelCase_ ( self ) -> Any:
# make the mask reproducible
np.random.seed(2 )
A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = int((config.image_size // config.patch_size) ** 2 )
A_ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A_ : str = model_class(_a )
A_ : List[Any] = self._prepare_for_class(_a , _a )
A_ : Union[str, Any] = model(_a , noise=_a )
A_ : int = copy.deepcopy(self._prepare_for_class(_a , _a ) )
A_ : str = model(**_a , noise=_a )
A_ : Optional[int] = outputs_dict[0].numpy()
A_ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
# make the mask reproducible
np.random.seed(2 )
A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
A_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_lowerCamelCase ):
A_ : Union[str, Any] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_a ):
A_ : List[str] = v.numpy()
else:
A_ : str = np.array(_a )
return inputs_np_dict
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_a )
A_ : int = self._prepare_for_class(_a , _a )
A_ : Optional[Any] = prepare_numpy_arrays(_a )
A_ : Union[str, Any] = model(_a , noise=_a )
A_ : int = model(**_a , noise=_a )
self.assert_outputs_same(_a , _a )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
# make masks reproducible
np.random.seed(2 )
A_ : Union[str, Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
A_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ : Dict = tf.constant(_a )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A_ : List[str] = tf_noise
super().check_pt_tf_models(_a , _a , _a )
def UpperCAmelCase_ ( self ) -> str:
# make mask reproducible
np.random.seed(2 )
A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Any = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_a )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(_a , _a ),)
if isinstance(_a , _a )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_a , """_keras_serializable""" , _a )
}
A_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
A_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ : Optional[int] = tf.convert_to_tensor(_a )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
A_ : Optional[int] = main_layer_class(_a )
A_ : Optional[int] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
A_ : Any = tf.keras.Model(_a , outputs=main_layer(_a ) )
A_ : str = model(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Dict = os.path.join(_a , """keras_model.h5""" )
model.save(_a )
A_ : Optional[int] = tf.keras.models.load_model(
_a , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_a , tf.keras.Model )
A_ : Optional[Any] = model(_a )
self.assert_outputs_same(_a , _a )
@slow
def UpperCAmelCase_ ( self ) -> Any:
# make mask reproducible
np.random.seed(2 )
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
A_ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A_ : int = model_class(_a )
A_ : Tuple = self._prepare_for_class(_a , _a )
A_ : List[Any] = model(_a , noise=_a )
if model_class.__name__ == "TFViTMAEModel":
A_ : Optional[int] = outputs.last_hidden_state.numpy()
A_ : int = 0
else:
A_ : List[Any] = outputs.logits.numpy()
A_ : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a , saved_model=_a )
A_ : List[str] = model_class.from_pretrained(_a )
A_ : Optional[Any] = model(_a , noise=_a )
if model_class.__name__ == "TFViTMAEModel":
A_ : int = after_outputs["last_hidden_state"].numpy()
A_ : str = 0
else:
A_ : Any = after_outputs["logits"].numpy()
A_ : List[str] = 0
A_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_a , 1e-5 )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# make mask reproducible
np.random.seed(2 )
A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = int((config.image_size // config.patch_size) ** 2 )
A_ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(_a )
A_ : List[str] = self._prepare_for_class(_a , _a )
A_ : Any = model(_a , noise=_a )
A_ : Optional[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_a )
A_ : Optional[int] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
A_ : Optional[Any] = model_class.from_config(model.config )
A_ : Tuple = new_model(_a ) # Build model
new_model.set_weights(model.get_weights() )
A_ : List[str] = new_model(_a , noise=_a )
self.assert_outputs_same(_a , _a )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.""" )
def UpperCAmelCase_ ( self ) -> int:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def UpperCAmelCase_ ( self ) -> Tuple:
pass
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Dict = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(_a )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> List[Any]:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A_ : Dict = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
A_ : str = self.default_image_processor
A_ : int = prepare_img()
A_ : Union[str, Any] = image_processor(images=_a , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A_ : Optional[int] = ViTMAEConfig()
A_ : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A_ : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
A_ : Tuple = model(**_a , noise=_a )
# verify the logits
A_ : str = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _a )
A_ : Union[str, Any] = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _a , atol=1e-4 )
| 369 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) -> Optional[Any]:
A_ : Any = data
A_ : Node | None = None
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> List[str]:
A_ : Tuple = None
A_ : str = None
def __iter__( self ) -> Iterator[Any]:
A_ : Dict = self.head
while self.head:
yield node.data
A_ : Optional[Any] = node.next
if node == self.head:
break
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join(str(_lowerCamelCase ) for item in iter(self ) )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
self.insert_nth(len(self ) , _lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
self.insert_nth(0 , _lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> None:
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
A_ : Optional[int] = Node(_lowerCamelCase )
if self.head is None:
A_ : str = new_node # first node points itself
A_ : Union[str, Any] = new_node
elif index == 0: # insert at head
A_ : List[Any] = self.head
A_ : List[Any] = new_node
else:
A_ : List[str] = self.head
for _ in range(index - 1 ):
A_ : Optional[int] = temp.next
A_ : Tuple = temp.next
A_ : str = new_node
if index == len(self ) - 1: # insert at tail
A_ : Optional[int] = new_node
def UpperCAmelCase_ ( self ) -> List[Any]:
return self.delete_nth(0 )
def UpperCAmelCase_ ( self ) -> Any:
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase_ ( self , _lowerCamelCase = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
A_ : int = self.head
if self.head == self.tail: # just one node
A_ : int = None
elif index == 0: # delete head node
A_ : Union[str, Any] = self.tail.next.next
A_ : Tuple = self.head.next
else:
A_ : Optional[int] = self.head
for _ in range(index - 1 ):
A_ : Tuple = temp.next
A_ : Any = temp.next
A_ : Tuple = temp.next.next
if index == len(self ) - 1: # delete at tail
A_ : List[str] = temp
return delete_node.data
def UpperCAmelCase_ ( self ) -> bool:
return len(self ) == 0
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
A_ : Any = CircularLinkedList()
assert len(a_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(a_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(a_ ) == i
circular_linked_list.insert_nth(a_ , i + 1 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = "speech_to_text_2"
_UpperCAmelCase : Optional[Any] = ["past_key_values"]
_UpperCAmelCase : Optional[int] = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , lowercase : Tuple=10_000 , lowercase : Tuple=6 , lowercase : Any=2_048 , lowercase : Optional[Any]=4 , lowercase : str=0.0 , lowercase : Optional[int]=True , lowercase : List[Any]="relu" , lowercase : str=256 , lowercase : Tuple=0.1 , lowercase : List[Any]=0.0 , lowercase : int=0.0 , lowercase : Dict=0.02 , lowercase : Optional[int]=2 , lowercase : Any=True , lowercase : Dict=1 , lowercase : List[Any]=0 , lowercase : Any=2 , lowercase : List[str]=1_024 , **lowercase : List[str] , ):
'''simple docstring'''
_snake_case = vocab_size
_snake_case = d_model
_snake_case = decoder_ffn_dim
_snake_case = decoder_layers
_snake_case = decoder_attention_heads
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = activation_function
_snake_case = init_std
_snake_case = decoder_layerdrop
_snake_case = use_cache
_snake_case = decoder_layers
_snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case = max_target_positions
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , **lowercase , ) | 282 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any]=None ) -> Any:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
_snake_case = nn.Parameter(__lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
_snake_case = nn.Parameter(__lowercase )
def a_ ( __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Any ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
_snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Optional[Any]:
# layernorm 1
_snake_case = weights[0][0][0]
_snake_case = np.asarray(layer_norm_a[0] )
_snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# lsh weights + output
_snake_case = weights[0][1]
if len(__lowercase ) < 4:
set_layer_weights_in_torch_lsh(__lowercase , torch_block.attention , __lowercase )
else:
set_layer_weights_in_torch_local(__lowercase , torch_block.attention , __lowercase )
# intermediate weighs
_snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowercase ) == 4:
_snake_case = intermediate_weights[2]
# layernorm 2
_snake_case = np.asarray(intermediate_weights[0][0] )
_snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# intermediate dense
_snake_case = np.asarray(intermediate_weights[1][0] )
_snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
# intermediate out
_snake_case = np.asarray(intermediate_weights[4][0] )
_snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict ) -> Optional[int]:
# reformer model
_snake_case = torch_model.reformer
# word embeds
_snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowercase ) , )
if isinstance(weights[3] , __lowercase ):
_snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
_snake_case = nn.Parameter(torch.tensor(__lowercase ) )
_snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowercase , __lowercase , __lowercase )
# output layer norm
_snake_case = np.asarray(weights[7][0] )
_snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# output embeddings
_snake_case = np.asarray(weights[9][0] )
_snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
_snake_case = ReformerConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case = ReformerModelWithLMHead(__lowercase )
with open(__lowercase , 'rb' ) as f:
_snake_case = pickle.load(__lowercase )['weights']
set_model_weights_in_torch(__lowercase , __lowercase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 282 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = filter(lambda lowerCamelCase__ : p.requires_grad , model.parameters() )
lowercase__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase__ = logging.getLogger(__name__)
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if metric == "rouge2":
lowercase__ : List[Any] = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
lowercase__ : int = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
lowercase__ : Optional[int] = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
lowercase__ : Optional[Any] = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
lowercase__ : Tuple = ModelCheckpoint(
dirpath=__lowerCamelCase , filename=__lowerCamelCase , monitor=F"""val_{metric}""" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=__lowerCamelCase , verbose=__lowerCamelCase , )
class snake_case__(pl.Callback ):
"""simple docstring"""
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : int = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowerCamelCase )
@rank_zero_only
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : pl.Trainer , SCREAMING_SNAKE_CASE : pl.LightningModule , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any=True ):
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
lowercase__ : Any = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
lowercase__ : List[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowercase__ : List[Any] = od / "test_results.txt"
lowercase__ : Any = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowercase__ : str = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
lowercase__ : str = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_lowerCamelCase )
generations_file.parent.mkdir(exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , "a+" ) as writer:
for key in sorted(_lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
lowercase__ : Optional[Any] = metrics[key]
if isinstance(_lowerCamelCase , torch.Tensor ):
lowercase__ : str = val.item()
lowercase__ : Optional[int] = f"""{key}: {val:.6f}\n"""
writer.write(_lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
lowercase__ : Tuple = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(_lowerCamelCase )
@rank_zero_only
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] ):
try:
lowercase__ : Tuple = pl_module.model.model.num_parameters()
except AttributeError:
lowercase__ : Dict = pl_module.model.num_parameters()
lowercase__ : Any = count_trainable_parameters(_lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : pl.Trainer , SCREAMING_SNAKE_CASE : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowerCamelCase , _lowerCamelCase , "test" )
@rank_zero_only
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : pl.Trainer , SCREAMING_SNAKE_CASE : Any ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 368 |
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = []
lowercase__ : Tuple = []
lowercase__ : Any = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
lowercase__ : Any = len(lowerCamelCase__ ) if (len(lowerCamelCase__ ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(lowerCamelCase__ ) , "Postfix".center(lowerCamelCase__ ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowerCamelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowerCamelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowerCamelCase__ ) == 0:
stack.append(lowerCamelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowerCamelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowerCamelCase__ ) # push x to stack
print(
x.center(8 ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , sep=" | " , ) # Output in tabular format
while len(lowerCamelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , sep=" | " , ) # Output in tabular format
return "".join(lowerCamelCase__ ) # return Postfix as str
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowerCamelCase__ ) ):
if infix[i] == "(":
lowercase__ : Tuple = ")" # change "(" to ")"
elif infix[i] == ")":
lowercase__ : Optional[Any] = "(" # change ")" to "("
return (infix_2_postfix("".join(lowerCamelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCAmelCase__ = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
lowerCAmelCase__ = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 121 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__magic_name__ = precision
__magic_name__ = ceil(precision / 14 )
__magic_name__ = 426880 * Decimal(10005 ).sqrt()
__magic_name__ = 1
__magic_name__ = 13591409
__magic_name__ = Decimal(A_ )
for k in range(1, A_ ):
__magic_name__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(A_ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__lowerCAmelCase : str = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 88 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowercase ( _SCREAMING_SNAKE_CASE : Features ):
'''simple docstring'''
_UpperCAmelCase = np.inf
def set_batch_size(_SCREAMING_SNAKE_CASE : FeatureType ) -> None:
nonlocal batch_size
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and feature.dtype == "binary":
_UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return None if batch_size is np.inf else batch_size
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCamelCase : NestedDataStructureLike[PathLike] , __UpperCamelCase : Optional[NamedSplit] = None , __UpperCamelCase : Optional[Features] = None , __UpperCamelCase : str = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : int , )->Union[str, Any]:
super().__init__(
__UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths}
_UpperCAmelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
_UpperCAmelCase = Parquet(
cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , hash=__UpperCamelCase , **__UpperCamelCase , )
def lowercase__ ( self : Union[str, Any] )->Dict:
# Build iterable dataset
if self.streaming:
_UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , )
_UpperCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCamelCase : Dataset , __UpperCamelCase : Union[PathLike, BinaryIO] , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : Tuple , )->Optional[int]:
_UpperCAmelCase = dataset
_UpperCAmelCase = path_or_buf
_UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features )
_UpperCAmelCase = parquet_writer_kwargs
def lowercase__ ( self : Optional[int] )->int:
_UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
_UpperCAmelCase = self._write(file_obj=__UpperCamelCase , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs )
else:
_UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs )
return written
def lowercase__ ( self : int , __UpperCamelCase : BinaryIO , __UpperCamelCase : int , **__UpperCamelCase : int )->int:
_UpperCAmelCase = 0
_UpperCAmelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __UpperCamelCase )
_UpperCAmelCase = self.dataset.features.arrow_schema
_UpperCAmelCase = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase , **__UpperCamelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __UpperCamelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
_UpperCAmelCase = query_table(
table=self.dataset._data , key=slice(__UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__UpperCamelCase )
written += batch.nbytes
writer.close()
return written
| 260 | 0 |
'''simple docstring'''
def lowerCAmelCase__ ( lowerCamelCase : str ):
_A : List[str] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_A : Any = ''
_A : Optional[int] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_A , _A : Union[str, Any] = 0, 0
# length[i] shows the length of palindromic substring with center i
_A : Dict = [1 for i in range(len(a__ ) )]
# for each character in new_string find corresponding palindromic string
_A : int = 0
for j in range(len(a__ ) ):
_A : int = 1 if j > r else min(length[l + r - j] // 2 ,r - j + 1 )
while (
j - k >= 0
and j + k < len(a__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_A : Optional[int] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_A : Any = j - k + 1 # noqa: E741
_A : int = j + k - 1
# update max_length and start position
if max_length < length[j]:
_A : Optional[Any] = length[j]
_A : Optional[int] = j
# create that string
_A : Optional[int] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
'''simple docstring'''
from __future__ import annotations
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : list[list[int]]):
_A : Dict = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.')
if len(SCREAMING_SNAKE_CASE) != 0:
_A : str = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(SCREAMING_SNAKE_CASE) != cols:
raise error
for value in row:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float)):
raise error
_A : str = rows
else:
_A : Tuple = []
def A ( self : Optional[Any]):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def A ( self : List[str]):
return len(self.rows)
@property
def A ( self : Optional[int]):
return len(self.rows[0])
@property
def A ( self : Optional[int]):
return (self.num_rows, self.num_columns)
@property
def A ( self : Any):
return self.order[0] == self.order[1]
def A ( self : int):
_A : Union[str, Any] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(SCREAMING_SNAKE_CASE)
def A ( self : List[Any]):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def A ( self : Tuple):
return bool(self.determinant())
def A ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int):
_A : str = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(SCREAMING_SNAKE_CASE).determinant()
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int):
if (row + column) % 2 == 0:
return self.get_minor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
return -1 * self.get_minor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def A ( self : Optional[int]):
return Matrix(
[
[self.get_minor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def A ( self : Optional[int]):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def A ( self : Tuple):
_A : str = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(SCREAMING_SNAKE_CASE)
def A ( self : Tuple):
_A : str = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse')
return self.adjugate() * (1 / determinant)
def __repr__( self : str):
return str(self.rows)
def __str__( self : Optional[int]):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(SCREAMING_SNAKE_CASE) for value in row]) + '.]'
for row in self.rows
])
+ "]"
)
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int | None = None):
_A : Tuple = TypeError('Row must be a list containing all ints and/or floats')
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
raise type_error
for value in row:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float)):
raise type_error
if len(SCREAMING_SNAKE_CASE) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix')
if position is None:
self.rows.append(SCREAMING_SNAKE_CASE)
else:
_A : List[str] = self.rows[0:position] + [row] + self.rows[position:]
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int | None = None):
_A : int = TypeError(
'Column must be a list containing all ints and/or floats')
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
raise type_error
for value in column:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float)):
raise type_error
if len(SCREAMING_SNAKE_CASE) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix')
if position is None:
_A : str = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
_A : Dict = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self : List[str] , SCREAMING_SNAKE_CASE : object):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any , SCREAMING_SNAKE_CASE : object):
return not self == other
def __neg__( self : Optional[Any]):
return self * -1
def __add__( self : Any , SCREAMING_SNAKE_CASE : Matrix):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self : List[str] , SCREAMING_SNAKE_CASE : Matrix):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order')
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self : Dict , SCREAMING_SNAKE_CASE : Matrix | int | float):
if isinstance(SCREAMING_SNAKE_CASE , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second')
return Matrix(
[
[Matrix.dot_product(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix')
def __pow__( self : Any , SCREAMING_SNAKE_CASE : int):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
raise TypeError('A Matrix can only be raised to the power of an int')
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power')
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power')
_A : Union[str, Any] = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def A ( cls : List[str] , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[int]):
return sum(row[i] * column[i] for i in range(len(SCREAMING_SNAKE_CASE)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227 | 0 |
"""simple docstring"""
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE__ ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : Dict="sayef/fsner-bert-base-uncased" ):
super(lowerCAmelCase , self ).__init__()
lowerCAmelCase = AutoModel.from_pretrained(lowerCAmelCase , return_dict=lowerCAmelCase )
lowerCAmelCase = torch.nn.CosineSimilarity(3 , 1e-08 )
lowerCAmelCase = torch.nn.Softmax(dim=1 )
def __lowercase ( self : Any , **lowerCAmelCase : int ):
return self.bert(**lowerCAmelCase ).last_hidden_state
def __lowercase ( self : List[str] , lowerCAmelCase : Any ):
return token_embeddings.sum(2 , keepdim=lowerCAmelCase )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any]=1 ):
return self.softmax(T * self.cos(lowerCAmelCase , lowerCAmelCase ) )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Any ):
lowerCAmelCase = W_supports["""sizes"""].tolist()
lowerCAmelCase = W_supports["""start_token_id"""].item()
lowerCAmelCase = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCAmelCase = self.BERT(**lowerCAmelCase )
lowerCAmelCase = self.BERT(**lowerCAmelCase )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = W_supports["""input_ids"""] == start_token_id
lowerCAmelCase = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(lowerCAmelCase ):
if i == 0:
lowerCAmelCase = 0
else:
lowerCAmelCase = support_sizes[i - 1]
lowerCAmelCase = S[s : s + size][start_token_masks[s : s + size]]
lowerCAmelCase = S[s : s + size][end_token_masks[s : s + size]]
lowerCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowerCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCAmelCase = torch.vstack((p_starts, p_start) )
lowerCAmelCase = torch.vstack((p_ends, p_end) )
else:
lowerCAmelCase = p_start
lowerCAmelCase = p_end
return p_starts, p_ends
| 155 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
a = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 155 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCamelCase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 361 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = len(A__ ) // 2
# choose the middle 3 elements
UpperCamelCase = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 249 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.