code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : Optional[Any] = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 81 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_snake_case : List[str] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] , *lowerCamelCase : str , **lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Optional[int] = """unispeech-sat"""
def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0_2 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(512, 512, 512, 512, 512, 512, 512) , snake_case__=(5, 2, 2, 2, 2, 2, 2) , snake_case__=(10, 3, 3, 3, 3, 2, 2) , snake_case__=False , snake_case__=128 , snake_case__=16 , snake_case__=False , snake_case__=True , snake_case__=0.0_5 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__=320 , snake_case__=2 , snake_case__=0.1 , snake_case__=100 , snake_case__=256 , snake_case__=256 , snake_case__=0.1 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=(512, 512, 512, 512, 1500) , snake_case__=(5, 3, 3, 1, 1) , snake_case__=(1, 2, 3, 1, 1) , snake_case__=512 , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=504 , **snake_case__ , ):
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
lowerCAmelCase : str = hidden_size
lowerCAmelCase : int = feat_extract_norm
lowerCAmelCase : Dict = feat_extract_activation
lowerCAmelCase : List[Any] = list(snake_case__ )
lowerCAmelCase : Dict = list(snake_case__ )
lowerCAmelCase : Dict = list(snake_case__ )
lowerCAmelCase : str = conv_bias
lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
lowerCAmelCase : str = num_conv_pos_embedding_groups
lowerCAmelCase : Optional[int] = len(self.conv_dim )
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : str = hidden_dropout
lowerCAmelCase : str = attention_dropout
lowerCAmelCase : Any = activation_dropout
lowerCAmelCase : Any = feat_proj_dropout
lowerCAmelCase : Tuple = final_dropout
lowerCAmelCase : Optional[Any] = layerdrop
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : List[str] = num_clusters
lowerCAmelCase : Tuple = do_stable_layer_norm
lowerCAmelCase : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase : str = apply_spec_augment
lowerCAmelCase : List[str] = mask_time_prob
lowerCAmelCase : List[Any] = mask_time_length
lowerCAmelCase : Optional[Any] = mask_time_min_masks
lowerCAmelCase : Dict = mask_feature_prob
lowerCAmelCase : Union[str, Any] = mask_feature_length
lowerCAmelCase : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase : Union[str, Any] = num_codevectors_per_group
lowerCAmelCase : int = num_codevector_groups
lowerCAmelCase : Tuple = contrastive_logits_temperature
lowerCAmelCase : Dict = feat_quantizer_dropout
lowerCAmelCase : List[str] = num_negatives
lowerCAmelCase : List[Any] = codevector_dim
lowerCAmelCase : str = proj_codevector_dim
lowerCAmelCase : Union[str, Any] = diversity_loss_weight
# ctc loss
lowerCAmelCase : Optional[Any] = ctc_loss_reduction
lowerCAmelCase : List[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase : Union[str, Any] = list(snake_case__ )
lowerCAmelCase : Any = list(snake_case__ )
lowerCAmelCase : int = list(snake_case__ )
lowerCAmelCase : Optional[int] = xvector_output_dim
@property
def lowercase ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 646 |
'''simple docstring'''
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCAmelCase :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=64 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Optional[Any] = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : Dict = use_input_mask
lowerCAmelCase : Tuple = use_token_type_ids
lowerCAmelCase : int = use_labels
lowerCAmelCase : int = vocab_size
lowerCAmelCase : Any = hidden_size
lowerCAmelCase : Optional[Any] = embedding_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Dict = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[str] = type_sequence_label_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Any = num_labels
lowerCAmelCase : str = num_choices
lowerCAmelCase : int = scope
def lowercase ( self ):
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Dict = None
if self.use_labels:
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = MobileBertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[int] = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = MobileBertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForNextSentencePrediction(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , next_sentence_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = MobileBertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = self.num_labels
lowerCAmelCase : List[Any] = MobileBertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : int = MobileBertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = self.num_choices
lowerCAmelCase : Any = MobileBertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : List[str] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Tuple = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : str = True
def lowercase ( self , snake_case__ , snake_case__ , snake_case__=False ):
lowerCAmelCase : int = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowerCAmelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
lowerCAmelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def lowercase ( self ):
lowerCAmelCase : List[Any] = MobileBertModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return torch.tensor(
_A , dtype=torch.long , device=_A , )
_lowerCAmelCase : Union[str, Any] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : List[str] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(snake_case__ )
lowerCAmelCase : List[Any] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase : Tuple = model(snake_case__ )[0]
lowerCAmelCase : List[Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=snake_case__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCAmelCase : List[str] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCAmelCase : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 646 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : float , __lowerCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
from __future__ import annotations
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
snake_case_ , snake_case_ = array[indexa], array[indexa]
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if length > 1:
snake_case_ = int(length / 2 )
for i in range(lowercase__ , low + middle ):
comp_and_swap(lowercase__ , lowercase__ , i + middle , lowercase__ )
bitonic_merge(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
bitonic_merge(lowercase__ , low + middle , lowercase__ , lowercase__ )
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if length > 1:
snake_case_ = int(length / 2 )
bitonic_sort(lowercase__ , lowercase__ , lowercase__ , 1 )
bitonic_sort(lowercase__ , low + middle , lowercase__ , 0 )
bitonic_merge(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if __name__ == "__main__":
A = input('Enter numbers separated by a comma:\n').strip()
A = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 187 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_a : List[str] = logging.getLogger(__name__)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = False
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
if not self.initialized:
lowerCAmelCase__ :Any = RagRetriever(
_lowerCAmelCase , question_encoder_tokenizer=_lowerCAmelCase , generator_tokenizer=_lowerCAmelCase , index=_lowerCAmelCase , init_retrieval=_lowerCAmelCase , )
lowerCAmelCase__ :Optional[int] = True
def snake_case_ ( self ):
'''simple docstring'''
self.retriever.index.init_index()
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ :Dict = self.retriever._main_retrieve(_lowerCAmelCase , _lowerCAmelCase )
return doc_ids, retrieved_doc_embeds
class _UpperCAmelCase ( _A ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
'''simple docstring'''
if index is not None and index.is_initialized() and len(_lowerCAmelCase ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
_lowerCAmelCase , question_encoder_tokenizer=_lowerCAmelCase , generator_tokenizer=_lowerCAmelCase , index=_lowerCAmelCase , init_retrieval=_lowerCAmelCase , )
lowerCAmelCase__ :int = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for worker in self.retrieval_workers
] )
def snake_case_ ( self ):
'''simple docstring'''
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowerCAmelCase__ :Dict = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowerCAmelCase__ ,lowerCAmelCase__ :Dict = ray.get(random_worker.retrieve.remote(_lowerCAmelCase , _lowerCAmelCase ) )
else:
lowerCAmelCase__ ,lowerCAmelCase__ :Optional[int] = self._main_retrieve(_lowerCAmelCase , _lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCAmelCase )
@classmethod
def snake_case_ ( cls , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
'''simple docstring'''
return super(_lowerCAmelCase , cls ).get_tokenizers(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def snake_case_ ( cls , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = kwargs.pop("config" , _lowerCAmelCase ) or RagConfig.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
lowerCAmelCase__ :Optional[Any] = RagTokenizer.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
lowerCAmelCase__ :str = rag_tokenizer.question_encoder
lowerCAmelCase__ :Any = rag_tokenizer.generator
if indexed_dataset is not None:
lowerCAmelCase__ :List[Any] = "custom"
lowerCAmelCase__ :int = CustomHFIndex(config.retrieval_vector_size , _lowerCAmelCase )
else:
lowerCAmelCase__ :Tuple = cls._build_index(_lowerCAmelCase )
return cls(
_lowerCAmelCase , question_encoder_tokenizer=_lowerCAmelCase , generator_tokenizer=_lowerCAmelCase , retrieval_workers=_lowerCAmelCase , index=_lowerCAmelCase , )
| 111 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : List[Any] = 16
_a : Tuple = 32
def snake_case__ ( UpperCAmelCase : Accelerator , UpperCAmelCase : int = 1_6 ):
lowerCAmelCase__ :Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase__ :Any = load_dataset("glue" , "mrpc" )
def tokenize_function(UpperCAmelCase : Dict ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ :Any = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ :List[str] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ :Optional[int] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ :int = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ :Optional[int] = 8
else:
lowerCAmelCase__ :List[Any] = None
return tokenizer.pad(
UpperCAmelCase , padding="longest" , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCAmelCase__ :Dict = DataLoader(
tokenized_datasets["train"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : List[Any] = mocked_dataloaders # noqa: F811
def snake_case__ ( UpperCAmelCase : Tuple , UpperCAmelCase : str ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , UpperCAmelCase ) == "1":
lowerCAmelCase__ :Tuple = 2
# New Code #
lowerCAmelCase__ :Tuple = int(args.gradient_accumulation_steps )
lowerCAmelCase__ :str = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase__ :Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ :Union[str, Any] = config["lr"]
lowerCAmelCase__ :int = int(config["num_epochs"] )
lowerCAmelCase__ :List[Any] = int(config["seed"] )
lowerCAmelCase__ :int = int(config["batch_size"] )
lowerCAmelCase__ :Tuple = evaluate.load("glue" , "mrpc" )
set_seed(UpperCAmelCase )
lowerCAmelCase__ ,lowerCAmelCase__ :Any = get_dataloaders(UpperCAmelCase , UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ :Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ :Any = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ :Optional[int] = AdamW(params=model.parameters() , lr=UpperCAmelCase )
# Instantiate scheduler
lowerCAmelCase__ :List[Any] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :Tuple = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
with LocalSGD(
accelerator=UpperCAmelCase , model=UpperCAmelCase , local_sgd_steps=UpperCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCAmelCase ):
lowerCAmelCase__ :List[str] = model(**UpperCAmelCase )
lowerCAmelCase__ :List[Any] = output.loss
accelerator.backward(UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ :Dict = model(**UpperCAmelCase )
lowerCAmelCase__ :List[Any] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ ,lowerCAmelCase__ :List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
lowerCAmelCase__ :Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase )
def snake_case__ ( ):
lowerCAmelCase__ :Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=UpperCAmelCase , default=UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=UpperCAmelCase , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowerCAmelCase__ :Tuple = parser.parse_args()
lowerCAmelCase__ :Any = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 111 | 1 |
'''simple docstring'''
import torch
from transformers import AutoModel
class _UpperCamelCase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , _a="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(_a , self ).__init__()
a__ = AutoModel.from_pretrained(_a , return_dict=_a )
a__ = torch.nn.CosineSimilarity(3 , 1e-08 )
a__ = torch.nn.Softmax(dim=1 )
def lowercase__ ( self , **_a ):
"""simple docstring"""
return self.bert(**_a ).last_hidden_state
def lowercase__ ( self , _a ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=_a )
def lowercase__ ( self , _a , _a , _a=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(_a , _a ) )
def lowercase__ ( self , _a , _a ):
"""simple docstring"""
a__ = W_supports['sizes'].tolist()
a__ = W_supports['start_token_id'].item()
a__ = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a__ = self.BERT(**_a )
a__ = self.BERT(**_a )
a__ = None
a__ = None
a__ = W_supports['input_ids'] == start_token_id
a__ = W_supports['input_ids'] == end_token_id
for i, size in enumerate(_a ):
if i == 0:
a__ = 0
else:
a__ = support_sizes[i - 1]
a__ = S[s : s + size][start_token_masks[s : s + size]]
a__ = S[s : s + size][end_token_masks[s : s + size]]
a__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
a__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a__ = torch.vstack((p_starts, p_start) )
a__ = torch.vstack((p_ends, p_end) )
else:
a__ = p_start
a__ = p_end
return p_starts, p_ends
| 394 |
'''simple docstring'''
import numpy
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
"""simple docstring"""
a__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
a__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
a__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
a__ = numpy.random.rand(3 , 1 )
# Real output values provided.
a__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
a__ = numpy.zeros(output_array.shape )
def lowercase__ ( self ):
"""simple docstring"""
a__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
a__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
a__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowercase__ ( self ):
"""simple docstring"""
a__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
a__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
a__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowercase__ ( self , _a , _a , _a ):
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
a__ = self.feedforward()
self.back_propagation()
if give_loss:
a__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = input_arr
a__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
a__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
a__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase_ ( a : numpy.ndarray ):
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase_ ( a : numpy.ndarray ):
return (value) * (1 - (value))
def lowerCAmelCase_ ( ):
a__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
a__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
a__ = TwoHiddenLayerNeuralNetwork(
input_array=a , output_array=a )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a , iterations=10 , give_loss=a )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 394 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class a :
"""simple docstring"""
A__ : int = MBartConfig
A__ : Union[str, Any] = {}
A__ : Optional[int] = "gelu"
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=False , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=20 , snake_case_=2 , snake_case_=1 , snake_case_=0 , ) -> Dict:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def __A ( self ) -> Optional[Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_mbart_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
return config, inputs_dict
def __A ( self , snake_case_ , snake_case_ ) -> List[str]:
_UpperCAmelCase = TFMBartModel(config=snake_case_ ).get_decoder()
_UpperCAmelCase = inputs_dict["input_ids"]
_UpperCAmelCase = input_ids[:1, :]
_UpperCAmelCase = inputs_dict["attention_mask"][:1, :]
_UpperCAmelCase = inputs_dict["head_mask"]
_UpperCAmelCase = 1
# first forward pass
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , use_cache=snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
_UpperCAmelCase = past_key_values[1]
def A__ ( A__ , A__ , A__ , A__=None , A__=None , A__=None , A__=None , A__=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
A__ : Dict = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A__ : Dict = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A__ : Dict = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A__ : Union[str, Any] = True
A__ : Union[str, Any] = False
A__ : int = False
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __A ( self ) -> int:
_UpperCAmelCase = TFMBartModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ )
def __A ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __A ( self ) -> Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
]
A__ : str = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
A__ : Dict = "facebook/mbart-large-en-ro"
@cached_property
def __A ( self ) -> Dict:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **snake_case_ ) -> Dict:
_UpperCAmelCase = self.translate_src_text(**snake_case_ )
self.assertListEqual(self.expected_text , snake_case_ )
def __A ( self , **snake_case_ ) -> str:
_UpperCAmelCase = self.tokenizer(self.src_text , **snake_case_ , return_tensors="tf" )
_UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_UpperCAmelCase = self.tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
return generated_words
@slow
def __A ( self ) -> Optional[int]:
self._assert_generated_batch_equal_expected()
| 579 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class _lowerCAmelCase ( __a ):
_lowercase ='''ibert'''
def __init__( self , _UpperCamelCase=30_522 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3_072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-1_2 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , _UpperCamelCase="absolute" , _UpperCamelCase=False , _UpperCamelCase="none" , **_UpperCamelCase , ) -> List[str]:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = quant_mode
lowerCAmelCase_ = force_dequant
class _lowerCAmelCase ( __a ):
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 290 | 1 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_snake_case : Union[str, Any] = float('nan')
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = sys.stdout
__SCREAMING_SNAKE_CASE = open(_a, "a" )
def __getattr__( self, _a ) -> str:
return getattr(self.stdout, _a )
def __lowerCAmelCase ( self, _a ) -> Optional[Any]:
self.stdout.write(_a )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r", "", _a, 0, re.M ) )
def _A ( __snake_case :List[Any]=80 , __snake_case :Tuple=False ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
# deal with critical env vars
__SCREAMING_SNAKE_CASE = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
__SCREAMING_SNAKE_CASE = os.environ.get(__snake_case , __snake_case )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__SCREAMING_SNAKE_CASE = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(__snake_case )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = ""
while len(__snake_case ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(__snake_case ) == 0 or len(__snake_case ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__snake_case )
__SCREAMING_SNAKE_CASE = ""
return "\\\n".join(__snake_case )
def _A ( __snake_case :List[str] , __snake_case :Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
__SCREAMING_SNAKE_CASE = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__SCREAMING_SNAKE_CASE = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _A ( __snake_case :Optional[int] , __snake_case :List[Any] , __snake_case :List[Any] , __snake_case :str , __snake_case :int , __snake_case :Union[str, Any] , __snake_case :Tuple ) -> Optional[Any]:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} , )
__SCREAMING_SNAKE_CASE = subprocess.run(__snake_case , capture_output=__snake_case , text=__snake_case )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
__SCREAMING_SNAKE_CASE = variation.replace(" " , "-" )
with open(Path(__snake_case ) / f'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(__snake_case ) / f'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
__SCREAMING_SNAKE_CASE = json.load(__snake_case )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _A ( __snake_case :Tuple , __snake_case :Union[str, Any] , __snake_case :int , __snake_case :Optional[int] , __snake_case :List[Any] , __snake_case :str , __snake_case :int , __snake_case :Union[str, Any] , __snake_case :str , __snake_case :int , ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = f'''{id}: {variation:<{longest_variation_len}}'''
__SCREAMING_SNAKE_CASE = f'''{preamble}: '''
__SCREAMING_SNAKE_CASE = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__snake_case ) , desc=__snake_case , leave=__snake_case ):
__SCREAMING_SNAKE_CASE = process_run_single(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = single_run_metrics[target_metric_key]
if not math.isnan(__snake_case ):
metrics.append(__snake_case )
results.append(__snake_case )
outcome += "✓"
else:
outcome += "✘"
__SCREAMING_SNAKE_CASE = f'''\33[2K\r{outcome}'''
if len(__snake_case ) > 0:
__SCREAMING_SNAKE_CASE = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__SCREAMING_SNAKE_CASE = round(mean_metrics[target_metric_key] , 2 )
__SCREAMING_SNAKE_CASE = f'''{outcome} {mean_target}'''
if len(__snake_case ) > 1:
results_str += f''' {tuple(round(__snake_case , 2 ) for x in results )}'''
print(__snake_case )
__SCREAMING_SNAKE_CASE = variation
return mean_metrics
else:
print(__snake_case )
return {variation_key: variation, target_metric_key: nan}
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def _A ( __snake_case :Any , __snake_case :Tuple , __snake_case :Any , __snake_case :Union[str, Any] , __snake_case :Union[str, Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = pd.DataFrame(__snake_case )
__SCREAMING_SNAKE_CASE = "variation"
__SCREAMING_SNAKE_CASE = "diff_%"
__SCREAMING_SNAKE_CASE = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__SCREAMING_SNAKE_CASE = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__snake_case ):
# as a fallback, use the minimal value as the sentinel
__SCREAMING_SNAKE_CASE = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__snake_case ):
__SCREAMING_SNAKE_CASE = df.apply(
lambda __snake_case : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
__SCREAMING_SNAKE_CASE = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__SCREAMING_SNAKE_CASE = df.reindex(__snake_case , axis="columns" ) # reorder cols
# capitalize
__SCREAMING_SNAKE_CASE = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
__SCREAMING_SNAKE_CASE = df.rename(lambda __snake_case : c.replace("_" , "<br>" ) , axis="columns" )
__SCREAMING_SNAKE_CASE = df.rename(lambda __snake_case : c.replace("_" , "\n" ) , axis="columns" )
__SCREAMING_SNAKE_CASE = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__snake_case , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__snake_case , floatfmt=".2f" )]
print("\n\n".join(__snake_case ) )
def _A ( ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=__snake_case , type=__snake_case , required=__snake_case , help="Base cmd" , )
parser.add_argument(
"--variations" , default=__snake_case , type=__snake_case , nargs="+" , required=__snake_case , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=__snake_case , type=__snake_case , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=__snake_case , type=__snake_case , required=__snake_case , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=__snake_case , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=__snake_case , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=__snake_case , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=__snake_case , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = args.output_dir
Path(__snake_case ).mkdir(exist_ok=__snake_case )
__SCREAMING_SNAKE_CASE = get_base_command(__snake_case , __snake_case )
# split each dimension into its --foo variations
__SCREAMING_SNAKE_CASE = [list(map(str.strip , re.split(R"\|" , __snake_case ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__SCREAMING_SNAKE_CASE = list(map(str.strip , map(" ".join , itertools.product(*__snake_case ) ) ) )
__SCREAMING_SNAKE_CASE = max(len(__snake_case ) for x in variations )
# split wanted keys
__SCREAMING_SNAKE_CASE = args.report_metric_keys.split()
# capture prints into a log file for convenience
__SCREAMING_SNAKE_CASE = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
__SCREAMING_SNAKE_CASE = Tee(__snake_case )
print(f'''\n*** Running {len(__snake_case )} benchmarks:''' )
print(f'''Base command: {" ".join(__snake_case )}''' )
__SCREAMING_SNAKE_CASE = "variation"
__SCREAMING_SNAKE_CASE = []
for id, variation in enumerate(tqdm(__snake_case , desc="Total completion: " , leave=__snake_case ) ):
__SCREAMING_SNAKE_CASE = base_cmd + variation.split()
results.append(
process_run(
id + 1 , __snake_case , __snake_case , __snake_case , __snake_case , args.target_metric_key , __snake_case , args.repeat_times , __snake_case , args.verbose , ) )
process_results(__snake_case , args.target_metric_key , __snake_case , args.base_variation , __snake_case )
if __name__ == "__main__":
main()
| 214 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ) -> Optional[int]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ) -> Any:
"""simple docstring"""
assert _test_patching.open is open
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , __snake_case ):
pass
def _A ( ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , __snake_case ) is None
with patch_submodule(_test_patching , "len" , __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_start_and_stop_mock__"
__SCREAMING_SNAKE_CASE = patch_submodule(_test_patching , "open" , __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ) -> str:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_successive_join__"
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_successive_dirname__"
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , __snake_case ):
with patch_submodule(_test_patching , "os.rename" , __snake_case ):
with patch_submodule(_test_patching , "os.path.dirname" , __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , __snake_case ):
with patch_submodule(_test_patching , "os.path.join" , __snake_case ):
with patch_submodule(_test_patching , "os.path.dirname" , __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , __snake_case ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , __snake_case ):
pass
| 214 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__ )
class lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__SCREAMING_SNAKE_CASE = Features({"""image""": Image()} )
__SCREAMING_SNAKE_CASE = Features({"""labels""": ClassLabel} )
__SCREAMING_SNAKE_CASE = """image"""
__SCREAMING_SNAKE_CASE = """labels"""
def UpperCamelCase__ (self , __a ) -> int:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , _A ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
UpperCAmelCase__ = copy.deepcopy(self )
UpperCAmelCase__ = self.label_schema.copy()
UpperCAmelCase__ = features[self.label_column]
UpperCAmelCase__ = label_schema
return task_template
@property
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 146 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ):
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 74 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : List[str] ):
__magic_name__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
__magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('''RGB''' )
__magic_name__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__magic_name__ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if "visual_encoder" in key:
__magic_name__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , snake_case_ )
if "blocks" in key:
__magic_name__ = re.sub(r'''blocks''' , '''layers''' , snake_case_ )
if "attn" in key:
__magic_name__ = re.sub(r'''attn''' , '''self_attn''' , snake_case_ )
if "norm1" in key:
__magic_name__ = re.sub(r'''norm1''' , '''layer_norm1''' , snake_case_ )
if "norm2" in key:
__magic_name__ = re.sub(r'''norm2''' , '''layer_norm2''' , snake_case_ )
if "encoder.norm" in key:
__magic_name__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , snake_case_ )
if "encoder.patch_embed.proj" in key:
__magic_name__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , snake_case_ )
if "encoder.pos_embed" in key:
__magic_name__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , snake_case_ )
if "encoder.cls_token" in key:
__magic_name__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , snake_case_ )
if "self_attn" in key:
__magic_name__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , snake_case_ )
return key
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : List[str]=None ):
if config_path is not None:
__magic_name__ = BlipConfig.from_pretrained(snake_case_ )
else:
__magic_name__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__magic_name__ = BlipForConditionalGeneration(snake_case_ ).eval()
__magic_name__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
__magic_name__ = blip_decoder(pretrained=snake_case_ , image_size=384 , vit='''base''' )
__magic_name__ = pt_model.eval()
__magic_name__ = pt_model.state_dict()
for key in modified_state_dict.copy():
__magic_name__ = modified_state_dict.pop(snake_case_ )
__magic_name__ = rename_key(snake_case_ )
__magic_name__ = value
hf_model.load_state_dict(snake_case_ )
__magic_name__ = 384
__magic_name__ = load_demo_image(image_size=snake_case_ , device='''cpu''' )
__magic_name__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__magic_name__ = tokenizer(['''a picture of'''] ).input_ids
__magic_name__ = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
__magic_name__ = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__magic_name__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
__magic_name__ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit='''base''' )
vqa_model.eval()
__magic_name__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
__magic_name__ = modified_state_dict.pop(snake_case_ )
__magic_name__ = rename_key(snake_case_ )
__magic_name__ = value
__magic_name__ = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
__magic_name__ = ['''How many dogs are in this image?''']
__magic_name__ = tokenizer(snake_case_ , return_tensors='''pt''' ).input_ids
__magic_name__ = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
__magic_name__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
__magic_name__ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit='''base''' )
itm_model.eval()
__magic_name__ = itm_model.state_dict()
for key in modified_state_dict.copy():
__magic_name__ = modified_state_dict.pop(snake_case_ )
__magic_name__ = rename_key(snake_case_ )
__magic_name__ = value
__magic_name__ = BlipForImageTextRetrieval(snake_case_ )
__magic_name__ = ['''A picture of a woman with a dog sitting in a beach''']
__magic_name__ = tokenizer(
snake_case_ , return_tensors='''pt''' , padding='''max_length''' , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
__magic_name__ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
__magic_name__ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
a_ : str = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 678 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 | 1 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A ):
def __init__( self , **A_ ):
requires_backends(self , ["""bs4"""] )
super().__init__(**A_ )
def __snake_case( self , A_ ):
_UpperCAmelCase : str = []
_UpperCAmelCase : Any = []
_UpperCAmelCase : List[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_UpperCAmelCase : int = parent.find_all(child.name , recursive=A_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(A_ ) else next(i for i, s in enumerate(A_ , 1 ) if s is child ) )
_UpperCAmelCase : List[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __snake_case( self , A_ ):
_UpperCAmelCase : Tuple = BeautifulSoup(A_ , """html.parser""" )
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : str = []
for element in html_code.descendants:
if type(A_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_UpperCAmelCase : Optional[Any] = html.unescape(A_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(A_ )
_UpperCAmelCase,_UpperCAmelCase : Tuple = self.xpath_soup(A_ )
stringaxtag_seq.append(A_ )
stringaxsubs_seq.append(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(A_ ) != len(A_ ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __snake_case( self , A_ , A_ ):
_UpperCAmelCase : str = """"""
for tagname, subs in zip(A_ , A_ ):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self , A_ ):
_UpperCAmelCase : Optional[int] = False
# Check that strings has a valid type
if isinstance(A_ , A_ ):
_UpperCAmelCase : Optional[int] = True
elif isinstance(A_ , (list, tuple) ):
if len(A_ ) == 0 or isinstance(html_strings[0] , A_ ):
_UpperCAmelCase : Any = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F'''but is of type {type(A_ )}.''' )
_UpperCAmelCase : Dict = bool(isinstance(A_ , (list, tuple) ) and (isinstance(html_strings[0] , A_ )) )
if not is_batched:
_UpperCAmelCase : Any = [html_strings]
# Get nodes + xpaths
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Tuple = []
for html_string in html_strings:
_UpperCAmelCase,_UpperCAmelCase,_UpperCAmelCase : Dict = self.get_three_from_single(A_ )
nodes.append(A_ )
_UpperCAmelCase : Any = []
for node, tag_list, sub_list in zip(A_ , A_ , A_ ):
_UpperCAmelCase : List[Any] = self.construct_xpath(A_ , A_ )
xpath_strings.append(A_ )
xpaths.append(A_ )
# return as Dict
_UpperCAmelCase : List[str] = {"""nodes""": nodes, """xpaths""": xpaths}
_UpperCAmelCase : str = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs
| 643 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( A ):
__SCREAMING_SNAKE_CASE = '''conditional_detr'''
__SCREAMING_SNAKE_CASE = ['''past_key_values''']
__SCREAMING_SNAKE_CASE = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , A_=True , A_=None , A_=3 , A_=3_00 , A_=6 , A_=20_48 , A_=8 , A_=6 , A_=20_48 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=2_56 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.0_2 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=2 , A_=5 , A_=2 , A_=1 , A_=1 , A_=2 , A_=5 , A_=2 , A_=0.2_5 , **A_ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase : int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(A_ , A_ ):
_UpperCAmelCase : Tuple = backbone_config.get("""model_type""" )
_UpperCAmelCase : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : Optional[Any] = config_class.from_dict(A_ )
_UpperCAmelCase : Tuple = use_timm_backbone
_UpperCAmelCase : List[str] = backbone_config
_UpperCAmelCase : Any = num_channels
_UpperCAmelCase : Tuple = num_queries
_UpperCAmelCase : Dict = d_model
_UpperCAmelCase : Dict = encoder_ffn_dim
_UpperCAmelCase : Tuple = encoder_layers
_UpperCAmelCase : List[Any] = encoder_attention_heads
_UpperCAmelCase : Union[str, Any] = decoder_ffn_dim
_UpperCAmelCase : Optional[int] = decoder_layers
_UpperCAmelCase : List[str] = decoder_attention_heads
_UpperCAmelCase : Optional[Any] = dropout
_UpperCAmelCase : str = attention_dropout
_UpperCAmelCase : Union[str, Any] = activation_dropout
_UpperCAmelCase : Union[str, Any] = activation_function
_UpperCAmelCase : int = init_std
_UpperCAmelCase : Union[str, Any] = init_xavier_std
_UpperCAmelCase : Tuple = encoder_layerdrop
_UpperCAmelCase : Tuple = decoder_layerdrop
_UpperCAmelCase : Tuple = encoder_layers
_UpperCAmelCase : Any = auxiliary_loss
_UpperCAmelCase : List[str] = position_embedding_type
_UpperCAmelCase : int = backbone
_UpperCAmelCase : Any = use_pretrained_backbone
_UpperCAmelCase : int = dilation
# Hungarian matcher
_UpperCAmelCase : Dict = class_cost
_UpperCAmelCase : Optional[Any] = bbox_cost
_UpperCAmelCase : Tuple = giou_cost
# Loss coefficients
_UpperCAmelCase : Union[str, Any] = mask_loss_coefficient
_UpperCAmelCase : int = dice_loss_coefficient
_UpperCAmelCase : List[Any] = cls_loss_coefficient
_UpperCAmelCase : List[Any] = bbox_loss_coefficient
_UpperCAmelCase : int = giou_loss_coefficient
_UpperCAmelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def __snake_case( self ):
return self.encoder_attention_heads
@property
def __snake_case( self ):
return self.d_model
def __snake_case( self ):
_UpperCAmelCase : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCAmelCase : Optional[int] = self.backbone_config.to_dict()
_UpperCAmelCase : Dict = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( A ):
__SCREAMING_SNAKE_CASE = version.parse('''1.11''' )
@property
def __snake_case( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __snake_case( self ):
return 1e-5
@property
def __snake_case( self ):
return 12
| 643 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : Optional[int] = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 706 | """simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase_ :Dict[str, int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :int = None , lowerCamelCase_ :int = None ):
"""simple docstring"""
super().__init__()
lowerCamelCase__ : Union[str, Any] =pad_token_id
lowerCamelCase__ : int =max_length
lowerCamelCase__ : List[Any] =vocab
lowerCamelCase__ : Optional[int] =merges
lowerCamelCase__ : Union[str, Any] =BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ )
@classmethod
def UpperCAmelCase__ ( cls :Any , lowerCamelCase_ :GPTaTokenizer , *lowerCamelCase_ :Any , **lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =[' '.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase__ : int =tokenizer.get_vocab()
return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def UpperCAmelCase__ ( cls :Any , lowerCamelCase_ :Union[str, os.PathLike] , *lowerCamelCase_ :str , **lowerCamelCase_ :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def UpperCAmelCase__ ( cls :Dict , lowerCamelCase_ :Any ):
"""simple docstring"""
return cls(**lowerCamelCase_ )
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :int , lowerCamelCase_ :int = None ):
"""simple docstring"""
lowerCamelCase__ : Tuple =self.tf_tokenizer(lowerCamelCase_ )
lowerCamelCase__ : str =tf.ones_like(lowerCamelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase__ : str =max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =pad_model_inputs(
lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids} | 174 |
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str = " " ) ->list:
lowerCamelCase__ : str =[]
lowerCamelCase__ : int =0
for index, char in enumerate(snake_case_ ):
if char == separator:
split_words.append(string[last_index:index] )
lowerCamelCase__ : Dict =index + 1
elif index + 1 == len(snake_case_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod() | 174 | 1 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase ( a__ ):
'''simple docstring'''
lowerCAmelCase__ = "char"
lowerCAmelCase__ = "bpe"
lowerCAmelCase__ = "wp"
__A : str = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase ( a__ ):
'''simple docstring'''
lowerCAmelCase__ = ["image_processor", "char_tokenizer"]
lowerCAmelCase__ = "ViTImageProcessor"
lowerCAmelCase__ = "MgpstrTokenizer"
def __init__( self : List[str] , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
lowerCamelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _A , )
lowerCamelCase__ = kwargs.pop("feature_extractor" )
lowerCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCamelCase__ = tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCamelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(_A , _A )
def __call__( self : Tuple , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict=None , **__lowerCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCamelCase__ = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None:
lowerCamelCase__ = self.char_tokenizer(_A , return_tensors=_A , **_A )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase__ = encodings['input_ids']
return inputs
def a__ ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = sequences
lowerCamelCase__ = char_preds.size(0 )
lowerCamelCase__ = self._decode_helper(_A , "char" )
lowerCamelCase__ = self._decode_helper(_A , "bpe" )
lowerCamelCase__ = self._decode_helper(_A , "wp" )
lowerCamelCase__ = []
lowerCamelCase__ = []
for i in range(_A ):
lowerCamelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCamelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCamelCase__ = scores.index(max(_A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCamelCase__ = {}
lowerCamelCase__ = final_strs
lowerCamelCase__ = final_scores
lowerCamelCase__ = char_strs
lowerCamelCase__ = bpe_strs
lowerCamelCase__ = wp_strs
return out
def a__ ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCamelCase__ = self.char_decode
lowerCamelCase__ = 1
lowerCamelCase__ = '[s]'
elif format == DecodeType.BPE:
lowerCamelCase__ = self.bpe_decode
lowerCamelCase__ = 2
lowerCamelCase__ = '#'
elif format == DecodeType.WORDPIECE:
lowerCamelCase__ = self.wp_decode
lowerCamelCase__ = 102
lowerCamelCase__ = '[SEP]'
else:
raise ValueError(f'''Format {format} is not supported.''' )
lowerCamelCase__ = [], []
lowerCamelCase__ = pred_logits.size(0 )
lowerCamelCase__ = pred_logits.size(1 )
lowerCamelCase__ = pred_logits.topk(1 , dim=-1 , largest=_A , sorted=_A )
lowerCamelCase__ = preds_index.view(-1 , _A )[:, 1:]
lowerCamelCase__ = decoder(_A )
lowerCamelCase__ = torch.nn.functional.softmax(_A , dim=2 ).max(dim=2 )
lowerCamelCase__ = preds_max_prob[:, 1:]
for index in range(_A ):
lowerCamelCase__ = preds_str[index].find(_A )
lowerCamelCase__ = preds_str[index][:pred_eos]
lowerCamelCase__ = preds_index[index].cpu().tolist()
lowerCamelCase__ = pred_index.index(_A ) if eos_token in pred_index else -1
lowerCamelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCamelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_A )
conf_scores.append(_A )
return dec_strs, conf_scores
def a__ ( self : Dict , __lowerCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(_A )]
return decode_strs
def a__ ( self : Any , __lowerCamelCase : Union[str, Any] ) -> str:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(_A )
def a__ ( self : Union[str, Any] , __lowerCamelCase : int ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(_A )]
return decode_strs
| 716 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__A : Any = get_logger()
__A : Optional[dict] = None
class lowercase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict=None , **__lowerCamelCase : str ) -> List[Any]:
'''simple docstring'''
super().__init__(features=__lowerCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__lowerCamelCase )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
lowerCamelCase__ = device if isinstance(__lowerCamelCase , __lowerCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCamelCase__ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
lowerCamelCase__ = str(jax.devices()[0] )
lowerCamelCase__ = jnp_array_kwargs
@staticmethod
def a__ ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(__lowerCamelCase ): device for device in jax.devices()}
def a__ ( self : Optional[Any] , __lowerCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__lowerCamelCase , __lowerCamelCase ) and column:
if all(
isinstance(__lowerCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__lowerCamelCase , axis=0 )
return column
def a__ ( self : List[Any] , __lowerCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__lowerCamelCase , (str, bytes, type(__lowerCamelCase )) ):
return value
elif isinstance(__lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCamelCase__ = {}
if isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCamelCase__ = {"dtype": jnp.intaa}
else:
lowerCamelCase__ = {"dtype": jnp.intaa}
elif isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCamelCase__ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__lowerCamelCase , PIL.Image.Image ):
lowerCamelCase__ = np.asarray(__lowerCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCamelCase__ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__lowerCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def a__ ( self : List[str] , __lowerCamelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__lowerCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__lowerCamelCase , "__array__" ) and not isinstance(__lowerCamelCase , jax.Array ):
lowerCamelCase__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__lowerCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
elif isinstance(__lowerCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
return self._tensorize(__lowerCamelCase )
def a__ ( self : Any , __lowerCamelCase : dict ) -> int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __lowerCamelCase , map_list=__lowerCamelCase )
def a__ ( self : Union[str, Any] , __lowerCamelCase : pa.Table ) -> Mapping:
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_row(__lowerCamelCase )
lowerCamelCase__ = self.python_features_decoder.decode_row(__lowerCamelCase )
return self.recursive_tensorize(__lowerCamelCase )
def a__ ( self : List[Any] , __lowerCamelCase : pa.Table ) -> "jax.Array":
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_column(__lowerCamelCase )
lowerCamelCase__ = self.python_features_decoder.decode_column(__lowerCamelCase , pa_table.column_names[0] )
lowerCamelCase__ = self.recursive_tensorize(__lowerCamelCase )
lowerCamelCase__ = self._consolidate(__lowerCamelCase )
return column
def a__ ( self : List[str] , __lowerCamelCase : pa.Table ) -> Mapping:
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_batch(__lowerCamelCase )
lowerCamelCase__ = self.python_features_decoder.decode_batch(__lowerCamelCase )
lowerCamelCase__ = self.recursive_tensorize(__lowerCamelCase )
for column_name in batch:
lowerCamelCase__ = self._consolidate(batch[column_name] )
return batch
| 187 | 0 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Any = XLMRobertaTokenizer
_snake_case : Tuple = XLMRobertaTokenizerFast
_snake_case : Any = True
_snake_case : Optional[int] = True
def A ( self : List[str] )-> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Dict )-> Union[str, Any]:
__UpperCamelCase = "<pad>"
__UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def A ( self : Dict )-> Dict:
__UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_02 )
def A ( self : List[str] )-> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def A ( self : Optional[int] )-> Optional[int]:
__UpperCamelCase = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__UpperCamelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def A ( self : Dict )-> str:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCamelCase = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__UpperCamelCase = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
__UpperCamelCase = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
__UpperCamelCase = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__UpperCamelCase = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@cached_property
def A ( self : Dict )-> List[str]:
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def A ( self : Optional[Any] )-> int:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , f.name )
__UpperCamelCase = XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = pickle.dumps(SCREAMING_SNAKE_CASE__ )
pickle.loads(SCREAMING_SNAKE_CASE__ )
def A ( self : Dict )-> List[Any]:
if not self.test_rust_tokenizer:
return
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = "I was born in 92000, and this is falsé."
__UpperCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def A ( self : Dict )-> Optional[Any]:
__UpperCamelCase = "Hello World!"
__UpperCamelCase = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def A ( self : Union[str, Any] )-> List[Any]:
__UpperCamelCase = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
__UpperCamelCase = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def A ( self : str )-> Any:
# fmt: off
__UpperCamelCase = {"input_ids": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , ) | 505 |
"""simple docstring"""
import os
import sys
_snake_case = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_snake_case = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def snake_case ( *_a: str , **_a: int )-> Union[str, Any]:
'''simple docstring'''
return AutoConfig.from_pretrained(*_a , **_a )
@add_start_docstrings(AutoTokenizer.__doc__ )
def snake_case ( *_a: str , **_a: int )-> Dict:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*_a , **_a )
@add_start_docstrings(AutoModel.__doc__ )
def snake_case ( *_a: List[str] , **_a: Optional[int] )-> Union[str, Any]:
'''simple docstring'''
return AutoModel.from_pretrained(*_a , **_a )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def snake_case ( *_a: List[str] , **_a: Tuple )-> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*_a , **_a )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def snake_case ( *_a: Any , **_a: Optional[int] )-> int:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*_a , **_a )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def snake_case ( *_a: Tuple , **_a: int )-> Union[str, Any]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*_a , **_a )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def snake_case ( *_a: Union[str, Any] , **_a: List[Any] )-> Any:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*_a , **_a )
| 510 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__snake_case : Tuple = [
"good first issue",
"feature request",
"wip",
]
def _lowercase ( ):
_a = Github(os.environ["GITHUB_TOKEN"] )
_a = g.get_repo("huggingface/accelerate" )
_a = repo.get_issues(state="open" )
for issue in open_issues:
_a = sorted([comment for comment in issue.get_comments()], key=lambda lowerCamelCase__ : i.created_at, reverse=lowerCamelCase__ )
_a = comments[0] if len(lowerCamelCase__ ) > 0 else None
_a = dt.utcnow()
_a = (current_time - issue.updated_at).days
_a = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 704 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : list[list] ):
_a = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
_a = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
_a = column
continue
_a = column / magnitude
# Subtract to cancel term
_a = current_set[0]
_a = [first_row]
_a = current_set[1::]
for row in current_set:
_a = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_a = final_set[0]
_a = []
_a = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_a = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowerCamelCase__ )
_a = resultant
return final_set
def _lowercase ( lowerCamelCase__ : list[list] ):
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_a = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
_a = equations.copy()
if any(0 in row for row in data_set ):
_a = data_set.copy()
_a = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
_a = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, lowerCamelCase__ )
_a = data_set.copy()
_a = simplify(lowerCamelCase__ )
_a = simplified[::-1]
_a = []
for row in simplified:
_a = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_a = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
_a = temp_row[1::]
_a = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
_a = []
for item in solutions:
final.append(float(round(lowerCamelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 691 | 0 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
SCREAMING_SNAKE_CASE_:str = datasets.utils.logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
__lowerCamelCase : bool = None
__lowerCamelCase : bool = None
class SCREAMING_SNAKE_CASE__ ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = datasets.Audio()
__lowerCamelCase : Optional[Any] = "audio"
__lowerCamelCase : Optional[Any] = AudioFolderConfig
__lowerCamelCase : List[str] # definition at the bottom of the script
__lowerCamelCase : int = AudioClassification(audio_column="audio" , label_column="label" )
SCREAMING_SNAKE_CASE_:Optional[Any] = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
SCREAMING_SNAKE_CASE_:str = AUDIO_EXTENSIONS
| 662 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = """Hello, World!"""
SCREAMING_SNAKE_CASE_:List[Any] = """en_XX"""
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[int] = Path("""data_bin""" )
A : Optional[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A : Any = xmod.model.encoder.sentence_encoder
A : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
A : int = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A : Any = xmod_sent_encoder.embed_tokens.weight
A : int = xmod_sent_encoder.embed_positions.weight
A : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A : Dict = xmod_sent_encoder.layernorm_embedding.weight
A : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A : str = model.roberta.encoder.layer[i]
A : Tuple = xmod_sent_encoder.layers[i]
# self attention
A : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
A : List[str] = xmod_layer.self_attn.q_proj.weight
A : Optional[int] = xmod_layer.self_attn.q_proj.bias
A : List[Any] = xmod_layer.self_attn.k_proj.weight
A : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
A : Optional[int] = xmod_layer.self_attn.v_proj.weight
A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
A : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
A : Optional[Any] = xmod_layer.self_attn.out_proj.weight
A : Dict = xmod_layer.self_attn.out_proj.bias
A : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
A : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
A : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
A : Optional[int] = xmod_layer.fca.weight
A : Optional[int] = xmod_layer.fca.bias
# output
A : Dict = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
A : Union[str, Any] = xmod_layer.fca.weight
A : int = xmod_layer.fca.bias
A : List[str] = xmod_layer.final_layer_norm.weight
A : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A : str = xmod_layer.adapter_layer_norm.weight
A : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A : Optional[int] = bert_output.adapter_modules[lang_code]
A : int = xmod_layer.adapter_modules[lang_code]
A : Optional[Any] = from_adapter.fca.weight
A : Optional[Any] = from_adapter.fca.bias
A : List[str] = from_adapter.fca.weight
A : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A : Dict = xmod_sent_encoder.layer_norm.weight
A : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
A : int = xmod.model.classification_heads["""mnli"""].dense.weight
A : Optional[Any] = xmod.model.classification_heads["""mnli"""].dense.bias
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight
A : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
A : Any = xmod.model.encoder.lm_head.dense.weight
A : Tuple = xmod.model.encoder.lm_head.dense.bias
A : Any = xmod.model.encoder.lm_head.layer_norm.weight
A : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
A : Union[str, Any] = xmod.model.encoder.lm_head.weight
A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A : Optional[int] = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A : List[str] = model(_lowerCAmelCase )[0]
if classification_head:
A : Dict = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
A : Optional[Any] = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A : Optional[Any] = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_:Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 662 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = torch.device("cpu")
def lowerCAmelCase__() -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw )
return im
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] )
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = dct.pop(__snake_case )
lowerCamelCase__ = val
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = []
for k in state_dict.keys():
lowerCamelCase__ = k
if ".pwconv" in k:
lowerCamelCase__ = k_new.replace('''.pwconv''' ,'''.point_wise_conv''' )
if ".dwconv" in k:
lowerCamelCase__ = k_new.replace('''.dwconv''' ,'''.depth_wise_conv''' )
if ".Proj." in k:
lowerCamelCase__ = k_new.replace('''.Proj.''' ,'''.proj.''' )
if "patch_embed" in k_new:
lowerCamelCase__ = k_new.replace('''patch_embed''' ,'''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
lowerCamelCase__ = k_new.split('''.''' )
if ls[2].isdigit():
lowerCamelCase__ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
lowerCamelCase__ = k_new.replace('''network''' ,'''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase__ = 1000
lowerCamelCase__ = '''huggingface/label-files'''
lowerCamelCase__ = '''imagenet-1k-id2label.json'''
lowerCamelCase__ = json.load(open(hf_hub_download(__snake_case ,__snake_case ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase__ = {int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCamelCase__ = [3, 3, 6, 4]
lowerCamelCase__ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCamelCase__ = [3, 3, 9, 6]
lowerCamelCase__ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCamelCase__ = [4, 3, 10, 5]
lowerCamelCase__ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCamelCase__ = [4, 4, 12, 6]
lowerCamelCase__ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
lowerCamelCase__ = torch.hub.load_state_dict_from_url(__snake_case ,map_location='''cpu''' ,check_hash=__snake_case )
else:
lowerCamelCase__ = torch.load(__snake_case ,map_location='''cpu''' )
lowerCamelCase__ = checkpoint
lowerCamelCase__ = create_rename_keys(__snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__snake_case ,__snake_case ,__snake_case )
# load HuggingFace model
lowerCamelCase__ = SwiftFormerForImageClassification(__snake_case ).eval()
hf_model.load_state_dict(__snake_case )
# prepare test inputs
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
lowerCamelCase__ = processor(images=__snake_case ,return_tensors='''pt''' )
# compare outputs from both models
lowerCamelCase__ = get_expected_output(__snake_case )
lowerCamelCase__ = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] ,__snake_case ,atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
_a = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 713 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_a = open # noqa: we just need to have a builtin inside this module to test it properly
| 29 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Union[str, Any] = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
__snake_case : Union[str, Any] = DatasetInfosDict.from_directory(__lowerCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , ),
] , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : int = str(__lowerCamelCase )
dataset_info.write_to_directory(__lowerCamelCase )
__snake_case : Optional[Any] = DatasetInfo.from_directory(__lowerCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowerCamelCase , "dataset_info.json" ) )
def lowerCAmelCase_ ( ):
__snake_case : Optional[int] = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
__snake_case : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(__lowerCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__snake_case : Dict = yaml.safe_dump(__lowerCamelCase )
__snake_case : Optional[Any] = yaml.safe_load(__lowerCamelCase )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase_ ( ):
__snake_case : List[Any] = DatasetInfo()
__snake_case : Tuple = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=4_2 ),
"v2": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = str(__lowerCamelCase )
dataset_infos_dict.write_to_directory(__lowerCamelCase )
__snake_case : Any = DatasetInfosDict.from_directory(__lowerCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__snake_case : Dict = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__snake_case : Union[str, Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowerCamelCase , "README.md" ) )
| 81 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 65 | 0 |
"""simple docstring"""
import functools
def UpperCamelCase ( _A , _A ) -> int:
# Validation
if not isinstance(_A , _A ) or not all(isinstance(_A , _A ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(_A ) != 3 or not all(isinstance(_A , _A ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(_A ) == 0:
return 0
if min(_A ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(_A ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
lowercase : List[Any] = set(_A )
@functools.cache
def dynamic_programming(_A ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
"""simple docstring"""
from __future__ import annotations
_lowerCAmelCase = [True] * 1_00_00_01
_lowerCAmelCase = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
_lowerCAmelCase = False
i += 1
def UpperCamelCase ( _A ) -> bool:
return seive[n]
def UpperCamelCase ( _A ) -> bool:
return any(digit in """02468""" for digit in str(_A ) )
def UpperCamelCase ( _A = 1_000_000 ) -> list[int]:
lowercase : Dict = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(_A ) and not contains_an_even_digit(_A ):
lowercase : str = str(_A )
lowercase : int = [int(str_num[j:] + str_num[:j] ) for j in range(len(_A ) )]
if all(is_prime(_A ) for i in list_nums ):
result.append(_A )
return result
def UpperCamelCase ( ) -> int:
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'{len(find_circular_primes()) = }')
| 348 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : list[int] ) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__lowercase , (list, tuple) ) or not all(
isinstance(__lowercase , __lowercase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
__UpperCamelCase = __UpperCamelCase = __UpperCamelCase = numbers[0]
for i in range(1 , len(__lowercase ) ):
# update the maximum and minimum subarray products
__UpperCamelCase = numbers[i]
if number < 0:
__UpperCamelCase , __UpperCamelCase = min_till_now, max_till_now
__UpperCamelCase = max(__lowercase , max_till_now * number )
__UpperCamelCase = min(__lowercase , min_till_now * number )
# update the maximum product found till now
__UpperCamelCase = max(__lowercase , __lowercase )
return max_prod
| 399 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Optional[int] =logging.get_logger(__name__)
set_seed(770)
a__ : str ={
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
a__ : str ={
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
a__ : Dict =os.path.dirname(os.path.abspath(__file__))
a__ : str =os.path.join(os.path.expanduser('''~'''), '''.cache''')
a__ : str =os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def lowercase__ ( __lowercase : List[str] , __lowercase : Optional[int]=False ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = model_type
if use_small:
key += "_small"
return os.path.join(__lowercase , REMOTE_MODEL_PATHS[key]['file_name'] )
def lowercase__ ( __lowercase : Optional[int] , __lowercase : int ) -> str:
"""simple docstring"""
os.makedirs(__lowercase , exist_ok=__lowercase )
hf_hub_download(repo_id=__lowercase , filename=__lowercase , local_dir=__lowercase )
def lowercase__ ( __lowercase : int , __lowercase : Tuple , __lowercase : Optional[int]=False , __lowercase : Tuple="text" ) -> Optional[Any]:
"""simple docstring"""
if model_type == "text":
__UpperCamelCase = BarkSemanticModel
__UpperCamelCase = BarkSemanticConfig
__UpperCamelCase = BarkSemanticGenerationConfig
elif model_type == "coarse":
__UpperCamelCase = BarkCoarseModel
__UpperCamelCase = BarkCoarseConfig
__UpperCamelCase = BarkCoarseGenerationConfig
elif model_type == "fine":
__UpperCamelCase = BarkFineModel
__UpperCamelCase = BarkFineConfig
__UpperCamelCase = BarkFineGenerationConfig
else:
raise NotImplementedError()
__UpperCamelCase = F'''{model_type}_small''' if use_small else model_type
__UpperCamelCase = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__lowercase ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info['repo_id'] , model_info['file_name'] )
__UpperCamelCase = torch.load(__lowercase , map_location=__lowercase )
# this is a hack
__UpperCamelCase = checkpoint['model_args']
if "input_vocab_size" not in model_args:
__UpperCamelCase = model_args['vocab_size']
__UpperCamelCase = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__UpperCamelCase = model_args.pop('n_head' )
__UpperCamelCase = model_args.pop('n_embd' )
__UpperCamelCase = model_args.pop('n_layer' )
__UpperCamelCase = ConfigClass(**checkpoint['model_args'] )
__UpperCamelCase = ModelClass(config=__lowercase )
__UpperCamelCase = GenerationConfigClass()
__UpperCamelCase = model_generation_config
__UpperCamelCase = checkpoint['model']
# fixup checkpoint
__UpperCamelCase = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(__lowercase ):
# replace part of the key with corresponding layer name in HF implementation
__UpperCamelCase = k[len(__lowercase ) :]
for old_layer_name in new_layer_name_dict:
__UpperCamelCase = new_k.replace(__lowercase , new_layer_name_dict[old_layer_name] )
__UpperCamelCase = state_dict.pop(__lowercase )
__UpperCamelCase = set(state_dict.keys() ) - set(model.state_dict().keys() )
__UpperCamelCase = {k for k in extra_keys if not k.endswith('.attn.bias' )}
__UpperCamelCase = set(model.state_dict().keys() ) - set(state_dict.keys() )
__UpperCamelCase = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(__lowercase ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(__lowercase ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(__lowercase , strict=__lowercase )
__UpperCamelCase = model.num_parameters(exclude_embeddings=__lowercase )
__UpperCamelCase = checkpoint['best_val_loss'].item()
logger.info(F'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(__lowercase , 3 )} loss''' )
model.eval()
model.to(__lowercase )
del checkpoint, state_dict
return model
def lowercase__ ( __lowercase : List[Any] , __lowercase : Any=False , __lowercase : List[Any]="text" ) -> int:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__UpperCamelCase = 'cpu' # do conversion on cpu
__UpperCamelCase = _get_ckpt_path(__lowercase , use_small=__lowercase )
__UpperCamelCase = _load_model(__lowercase , __lowercase , model_type=__lowercase , use_small=__lowercase )
# load bark initial model
__UpperCamelCase = _bark_load_model(__lowercase , 'cpu' , model_type=__lowercase , use_small=__lowercase )
if model_type == "text":
__UpperCamelCase = bark_model['model']
if model.num_parameters(exclude_embeddings=__lowercase ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
__UpperCamelCase = 5
__UpperCamelCase = 10
if model_type in ["text", "coarse"]:
__UpperCamelCase = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__UpperCamelCase = bark_model(__lowercase )[0]
__UpperCamelCase = model(__lowercase )
# take last logits
__UpperCamelCase = output_new_model_total.logits[:, [-1], :]
else:
__UpperCamelCase = 3
__UpperCamelCase = 8
__UpperCamelCase = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__UpperCamelCase = model(__lowercase , __lowercase )
__UpperCamelCase = bark_model(__lowercase , __lowercase )
__UpperCamelCase = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : List[Any] , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = os.path.join(__lowercase , __lowercase )
__UpperCamelCase = BarkSemanticConfig.from_pretrained(os.path.join(__lowercase , 'config.json' ) )
__UpperCamelCase = BarkCoarseConfig.from_pretrained(os.path.join(__lowercase , 'config.json' ) )
__UpperCamelCase = BarkFineConfig.from_pretrained(os.path.join(__lowercase , 'config.json' ) )
__UpperCamelCase = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
__UpperCamelCase = BarkSemanticModel.from_pretrained(__lowercase )
__UpperCamelCase = BarkCoarseModel.from_pretrained(__lowercase )
__UpperCamelCase = BarkFineModel.from_pretrained(__lowercase )
__UpperCamelCase = EncodecModel.from_pretrained('facebook/encodec_24khz' )
__UpperCamelCase = BarkConfig.from_sub_model_configs(
__lowercase , __lowercase , __lowercase , __lowercase )
__UpperCamelCase = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__UpperCamelCase = BarkModel(__lowercase )
__UpperCamelCase = semantic
__UpperCamelCase = coarseAcoustic
__UpperCamelCase = fineAcoustic
__UpperCamelCase = codec
__UpperCamelCase = bark_generation_config
Path(__lowercase ).mkdir(exist_ok=__lowercase )
bark.save_pretrained(__lowercase , repo_id=__lowercase , push_to_hub=__lowercase )
if __name__ == "__main__":
a__ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
a__ : Optional[int] =parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 399 | 1 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Optional[Any] = 'owlvit_text_model'
def __init__( self : str , __lowerCAmelCase : List[str]=4_9408 , __lowerCAmelCase : Union[str, Any]=512 , __lowerCAmelCase : Tuple=2048 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : int=8 , __lowerCAmelCase : Optional[int]=16 , __lowerCAmelCase : Tuple="quick_gelu" , __lowerCAmelCase : Dict=1e-5 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Any=1.0 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : Optional[int]=4_9406 , __lowerCAmelCase : str=4_9407 , **__lowerCAmelCase : int , ):
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_act
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = initializer_range
_UpperCAmelCase = initializer_factor
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , __lowerCAmelCase : Union[str, os.PathLike] , **__lowerCAmelCase : Optional[Any] ):
cls._set_token_in_kwargs(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
_UpperCAmelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = 'owlvit_vision_model'
def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : Union[str, Any]=3072 , __lowerCAmelCase : Tuple=12 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : str=768 , __lowerCAmelCase : int=32 , __lowerCAmelCase : Optional[Any]="quick_gelu" , __lowerCAmelCase : Tuple=1e-5 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : List[Any]=1.0 , **__lowerCAmelCase : List[str] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = initializer_range
_UpperCAmelCase = initializer_factor
@classmethod
def lowerCAmelCase_ ( cls : Dict , __lowerCAmelCase : Union[str, os.PathLike] , **__lowerCAmelCase : int ):
cls._set_token_in_kwargs(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
_UpperCAmelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class a ( lowerCAmelCase_ ):
_snake_case : Optional[Any] = 'owlvit'
_snake_case : List[Any] = True
def __init__( self : Optional[int] , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : int=512 , __lowerCAmelCase : Union[str, Any]=2.6_592 , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Dict , ):
super().__init__(**__lowerCAmelCase )
if text_config is None:
_UpperCAmelCase = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
_UpperCAmelCase = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
_UpperCAmelCase = OwlViTTextConfig(**__lowerCAmelCase )
_UpperCAmelCase = OwlViTVisionConfig(**__lowerCAmelCase )
_UpperCAmelCase = projection_dim
_UpperCAmelCase = logit_scale_init_value
_UpperCAmelCase = return_dict
_UpperCAmelCase = 1.0
@classmethod
def lowerCAmelCase_ ( cls : Dict , __lowerCAmelCase : Union[str, os.PathLike] , **__lowerCAmelCase : Optional[Any] ):
cls._set_token_in_kwargs(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , **__lowerCAmelCase : Tuple ):
_UpperCAmelCase = {}
_UpperCAmelCase = text_config
_UpperCAmelCase = vision_config
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.text_config.to_dict()
_UpperCAmelCase = self.vision_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : List[str] ):
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def lowerCAmelCase_ ( self : str ):
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def lowerCAmelCase_ ( self : List[str] ):
return 1e-4
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : Optional["TensorType"] = None , ):
_UpperCAmelCase = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase = super().generate_dummy_inputs(
processor.image_processor , batch_size=__lowerCAmelCase , framework=__lowerCAmelCase )
return {**text_input_dict, **image_input_dict}
@property
def lowerCAmelCase_ ( self : str ):
return 14
| 275 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a ( lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Union[str, Any] = 'nat'
_snake_case : List[str] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] , __lowerCAmelCase : int=4 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Dict=64 , __lowerCAmelCase : int=[3, 4, 6, 5] , __lowerCAmelCase : List[str]=[2, 4, 8, 16] , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : List[str]=3.0 , __lowerCAmelCase : int=True , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : Union[str, Any]=1e-5 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Optional[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(__lowerCAmelCase )
_UpperCAmelCase = num_heads
_UpperCAmelCase = kernel_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__lowerCAmelCase ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
| 275 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : List[str] = {'''vocab_file''': '''vocab.json'''}
lowercase__ : Union[str, Any] = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
lowercase__ : Optional[int] = {'''mgp-str''': 27}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="[GO]" , _UpperCAmelCase="[GO]" , _UpperCAmelCase="[s]" , _UpperCAmelCase="[GO]" , **_UpperCAmelCase):
'''simple docstring'''
super().__init__(
unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding='utf-8') as vocab_handle:
__A : Union[str, Any] = json.load(_UpperCAmelCase)
__A : str = {v: k for k, v in self.vocab.items()}
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return len(self.vocab)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = []
for s in text:
char_tokens.extend(_UpperCAmelCase)
return char_tokens
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.vocab.get(_UpperCAmelCase , self.vocab.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase):
logger.error('Vocabulary path ({}) should be a directory'.format(_UpperCAmelCase))
return
__A : int = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase) + '\n')
return (vocab_file,) | 8 |
def __a ( __lowerCAmelCase , __lowerCAmelCase = 0 ) -> list:
SCREAMING_SNAKE_CASE : int = length or len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : Any = True
return list_data if not swapped else bubble_sort(__lowerCAmelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 352 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCAmelCase_ ( A_ ,A_):
UpperCamelCase__: Optional[Any] = iter(A_)
while True:
UpperCamelCase__: Dict = tuple(itertools.islice(A_ ,A_))
if not chunk:
return
yield chunk
def lowerCAmelCase_ ( A_):
UpperCamelCase__: int = "".join([c.upper() for c in dirty if c in string.ascii_letters])
UpperCamelCase__: List[Any] = ""
if len(A_) < 2:
return dirty
for i in range(len(A_) - 1):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(A_) & 1:
clean += "X"
return clean
def lowerCAmelCase_ ( A_):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
UpperCamelCase__: int = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
UpperCamelCase__: str = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(A_)
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(A_)
return table
def lowerCAmelCase_ ( A_ ,A_):
UpperCamelCase__: Dict = generate_table(A_)
UpperCamelCase__: Optional[Any] = prepare_input(A_)
UpperCamelCase__: Optional[Any] = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(A_ ,2):
UpperCamelCase__ , UpperCamelCase__: Optional[int] = divmod(table.index(A_) ,5)
UpperCamelCase__ , UpperCamelCase__: Optional[int] = divmod(table.index(A_) ,5)
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCAmelCase_ ( A_ ,A_):
UpperCamelCase__: Optional[Any] = generate_table(A_)
UpperCamelCase__: Optional[int] = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(A_ ,2):
UpperCamelCase__ , UpperCamelCase__: List[str] = divmod(table.index(A_) ,5)
UpperCamelCase__ , UpperCamelCase__: Dict = divmod(table.index(A_) ,5)
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 221 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Tuple = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """gpt_neox"""
def __init__( self: List[str] , __lowerCamelCase: List[Any]=5_0432 , __lowerCamelCase: List[Any]=6144 , __lowerCamelCase: Tuple=44 , __lowerCamelCase: Optional[Any]=64 , __lowerCamelCase: Optional[Any]=2_4576 , __lowerCamelCase: Optional[int]="gelu" , __lowerCamelCase: Tuple=0.25 , __lowerCamelCase: str=1_0000 , __lowerCamelCase: Any=0.0 , __lowerCamelCase: List[Any]=0.0 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: List[str]=2048 , __lowerCamelCase: Dict=0.02 , __lowerCamelCase: Any=1e-5 , __lowerCamelCase: Any=True , __lowerCamelCase: Optional[int]=0 , __lowerCamelCase: Dict=2 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: List[Any]=None , **__lowerCamelCase: Any , ):
'''simple docstring'''
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase__: List[str] = vocab_size
UpperCamelCase__: List[str] = max_position_embeddings
UpperCamelCase__: Optional[Any] = hidden_size
UpperCamelCase__: Any = num_hidden_layers
UpperCamelCase__: Optional[int] = num_attention_heads
UpperCamelCase__: Union[str, Any] = intermediate_size
UpperCamelCase__: List[str] = hidden_act
UpperCamelCase__: str = rotary_pct
UpperCamelCase__: Any = rotary_emb_base
UpperCamelCase__: int = attention_dropout
UpperCamelCase__: Optional[Any] = hidden_dropout
UpperCamelCase__: Any = classifier_dropout
UpperCamelCase__: List[str] = initializer_range
UpperCamelCase__: Optional[Any] = layer_norm_eps
UpperCamelCase__: int = use_cache
UpperCamelCase__: Optional[Any] = tie_word_embeddings
UpperCamelCase__: List[str] = use_parallel_residual
UpperCamelCase__: str = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"got {self.rope_scaling}" )
UpperCamelCase__: Any = self.rope_scaling.get("type" , __lowerCamelCase )
UpperCamelCase__: int = self.rope_scaling.get("factor" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 221 | 1 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
a : Tuple = logging.getLogger(__name__)
def lowerCamelCase__ ( __lowerCamelCase : str=2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : str=16 , __lowerCamelCase : int = 10 , __lowerCamelCase : int = 2 ):
def get_dataset(__lowerCamelCase : List[Any] ):
__UpperCAmelCase : Tuple = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__lowerCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCAmelCase : int = get_dataset(__lowerCamelCase )
__UpperCAmelCase : Tuple = get_dataset(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
__UpperCAmelCase : int = DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , batch_size=__lowerCamelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple=None ):
__UpperCAmelCase : Dict = []
for epoch in range(__lowerCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = batch
__UpperCAmelCase : str = model(__lowerCamelCase )
__UpperCAmelCase : Dict = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase )
accelerator.backward(__lowerCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class a ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> Any:
super().__init__()
__UpperCAmelCase : Tuple = nn.Parameter(torch.randn(1 ) )
__UpperCAmelCase : str = nn.Parameter(torch.randn(1 ) )
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : List[str] ) -> Union[str, Any]:
return x * self.a + self.b
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : List[str] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase : List[Any] = DummyModel()
__UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase , __UpperCAmelCase : str = dummy_dataloaders()
__UpperCAmelCase : int = ProjectConfiguration(total_limit=1 , project_dir=__lowercase , automatic_checkpoint_naming=__lowercase )
# Train baseline
__UpperCAmelCase : Optional[int] = Accelerator(project_config=__lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase : List[str] = DummyModel()
__UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase , __UpperCAmelCase : Dict = dummy_dataloaders()
# Train baseline
__UpperCAmelCase : int = Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase )
# Save initial
__UpperCAmelCase : Any = os.path.join(__lowercase , """initial""" )
accelerator.save_state(__lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) : List[Any] = model.a.item(), model.b.item()
__UpperCAmelCase : int = optimizer.state_dict()
__UpperCAmelCase : str = train(3 , __lowercase , __lowercase , __lowercase , __lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) : int = model.a.item(), model.b.item()
__UpperCAmelCase : Tuple = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCAmelCase : int = DummyModel()
__UpperCAmelCase : Tuple = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = dummy_dataloaders()
__UpperCAmelCase : Optional[Any] = Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase )
accelerator.load_state(__lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) : List[Any] = model.a.item(), model.b.item()
__UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
__UpperCAmelCase : Union[str, Any] = train(2 , __lowercase , __lowercase , __lowercase , __lowercase )
# Save everything
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , """checkpoint""" )
accelerator.save_state(__lowercase )
# Load everything back in and make sure all states work
accelerator.load_state(__lowercase )
test_rands += train(1 , __lowercase , __lowercase , __lowercase , __lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[int] = model.a.item(), model.b.item()
__UpperCAmelCase : str = optimizer.state_dict()
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase : Any = DummyModel()
__UpperCAmelCase : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = dummy_dataloaders()
__UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=__lowercase )
# Train baseline
__UpperCAmelCase : Dict = Accelerator(project_dir=__lowercase , project_config=__lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase )
# Save initial
accelerator.save_state()
((__UpperCAmelCase) , (__UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
__UpperCAmelCase : Dict = optimizer.state_dict()
__UpperCAmelCase : Tuple = train(3 , __lowercase , __lowercase , __lowercase , __lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) : int = model.a.item(), model.b.item()
__UpperCAmelCase : str = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCAmelCase : List[Any] = DummyModel()
__UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = dummy_dataloaders()
__UpperCAmelCase : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__lowercase )
__UpperCAmelCase : Any = Accelerator(project_dir=__lowercase , project_config=__lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase )
accelerator.load_state(os.path.join(__lowercase , """checkpoints""" , """checkpoint_0""" ) )
((__UpperCAmelCase) , (__UpperCAmelCase)) : Tuple = model.a.item(), model.b.item()
__UpperCAmelCase : Optional[Any] = optimizer.state_dict()
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
__UpperCAmelCase : Dict = train(2 , __lowercase , __lowercase , __lowercase , __lowercase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__lowercase , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , __lowercase , __lowercase , __lowercase , __lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
__UpperCAmelCase : Optional[int] = optimizer.state_dict()
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Tuple = torch.tensor([1, 2, 3] )
__UpperCAmelCase : Dict = torch.tensor([2, 3, 4] )
__UpperCAmelCase : Union[str, Any] = DummyModel()
__UpperCAmelCase : str = torch.optim.Adam(net.parameters() )
__UpperCAmelCase : str = Accelerator()
with self.assertRaises(__lowercase ) as ve:
accelerator.register_for_checkpointing(__lowercase , __lowercase , __lowercase , __lowercase )
__UpperCAmelCase : Optional[int] = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def UpperCAmelCase ( self : Dict ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase : int = DummyModel()
__UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase : Dict = torch.optim.lr_scheduler.StepLR(__lowercase , step_size=1 , gamma=0.99 )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = dummy_dataloaders()
__UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=__lowercase )
# Train baseline
__UpperCAmelCase : Optional[Any] = Accelerator(project_dir=__lowercase , project_config=__lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Save initial
accelerator.save_state()
__UpperCAmelCase : Optional[int] = scheduler.state_dict()
train(3 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.assertNotEqual(__lowercase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__lowercase , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(__lowercase , scheduler.state_dict() )
def UpperCAmelCase ( self : Any ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase : Optional[int] = DummyModel()
__UpperCAmelCase : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=__lowercase , total_limit=2 )
# Train baseline
__UpperCAmelCase : Dict = Accelerator(project_dir=__lowercase , project_config=__lowercase )
__UpperCAmelCase : int = accelerator.prepare(__lowercase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__lowercase , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase : List[str] = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowercase , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = "/tmp/accelerate/state_checkpointing"
a : Any = DummyModel()
a : str = torch.optim.Adam(params=model.parameters(), lr=1e-3)
a : List[str] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
a ,a : List[Any] = dummy_dataloaders()
a : List[str] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
a : Dict = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
a ,a ,a ,a ,a : Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
a ,a : str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
a : Union[str, Any] = group["params"][0].device
break
assert param_device.type == accelerator.device.type
a : str = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
a : Union[str, Any] = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
a : Dict = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 63 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
UpperCAmelCase_ = logging.get_logger(__name__)
def __magic_name__ ( lowercase ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : List[Any] = ["""pixel_values"""]
def __init__( self, snake_case__ = True, snake_case__ = None, snake_case__ = PILImageResampling.BILINEAR, snake_case__ = True, snake_case__ = None, snake_case__ = True, snake_case__ = 1 / 2_55, snake_case__ = True, snake_case__ = True, snake_case__ = None, snake_case__ = None, **snake_case__, ) -> None:
"""simple docstring"""
super().__init__(**snake_case__ )
lowercase_ : str = size if size is not None else {"""shortest_edge""": 2_56}
lowercase_ : int = get_size_dict(snake_case__, default_to_square=snake_case__ )
lowercase_ : int = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
lowercase_ : Optional[int] = get_size_dict(snake_case__, param_name="""crop_size""" )
lowercase_ : List[Any] = do_resize
lowercase_ : int = size
lowercase_ : int = do_center_crop
lowercase_ : Optional[Any] = crop_size
lowercase_ : str = resample
lowercase_ : Any = do_rescale
lowercase_ : Dict = rescale_factor
lowercase_ : List[Any] = offset
lowercase_ : int = do_normalize
lowercase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ = PILImageResampling.BILINEAR, snake_case__ = None, **snake_case__, ) -> np.ndarray:
"""simple docstring"""
lowercase_ : int = get_size_dict(snake_case__, default_to_square=snake_case__ )
if "shortest_edge" in size:
lowercase_ : Union[str, Any] = get_resize_output_image_size(snake_case__, size["""shortest_edge"""], default_to_square=snake_case__ )
elif "height" in size and "width" in size:
lowercase_ : Dict = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(snake_case__, size=snake_case__, resample=snake_case__, data_format=snake_case__, **snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ = None, **snake_case__, ) -> np.ndarray:
"""simple docstring"""
lowercase_ : List[Any] = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(snake_case__, size=(size["""height"""], size["""width"""]), data_format=snake_case__, **snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ = True, snake_case__ = None, **snake_case__, ) -> str:
"""simple docstring"""
lowercase_ : Dict = image.astype(np.floataa )
if offset:
lowercase_ : Optional[Any] = image - (scale / 2)
return rescale(snake_case__, scale=snake_case__, data_format=snake_case__, **snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__ = None, **snake_case__, ) -> np.ndarray:
"""simple docstring"""
return normalize(snake_case__, mean=snake_case__, std=snake_case__, data_format=snake_case__, **snake_case__ )
def snake_case__ ( self, snake_case__, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = ChannelDimension.FIRST, ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
lowercase_ : Optional[int] = to_numpy_array(snake_case__ )
if do_resize:
lowercase_ : Dict = self.resize(image=snake_case__, size=snake_case__, resample=snake_case__ )
if do_center_crop:
lowercase_ : Optional[int] = self.center_crop(snake_case__, size=snake_case__ )
if do_rescale:
lowercase_ : Dict = self.rescale(image=snake_case__, scale=snake_case__, offset=snake_case__ )
if do_normalize:
lowercase_ : Optional[Any] = self.normalize(image=snake_case__, mean=snake_case__, std=snake_case__ )
lowercase_ : Tuple = to_channel_dimension_format(snake_case__, snake_case__ )
return image
def snake_case__ ( self, snake_case__, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = None, snake_case__ = ChannelDimension.FIRST, **snake_case__, ) -> PIL.Image.Image:
"""simple docstring"""
lowercase_ : Any = do_resize if do_resize is not None else self.do_resize
lowercase_ : str = resample if resample is not None else self.resample
lowercase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : List[Any] = offset if offset is not None else self.offset
lowercase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Any = image_mean if image_mean is not None else self.image_mean
lowercase_ : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase_ : Optional[Any] = size if size is not None else self.size
lowercase_ : Dict = get_size_dict(snake_case__, default_to_square=snake_case__ )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
lowercase_ : Tuple = get_size_dict(snake_case__, param_name="""crop_size""" )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowercase_ : Optional[Any] = make_batched(snake_case__ )
lowercase_ : Optional[Any] = [
[
self._preprocess_image(
image=snake_case__, do_resize=snake_case__, size=snake_case__, resample=snake_case__, do_center_crop=snake_case__, crop_size=snake_case__, do_rescale=snake_case__, rescale_factor=snake_case__, offset=snake_case__, do_normalize=snake_case__, image_mean=snake_case__, image_std=snake_case__, data_format=snake_case__, )
for img in video
]
for video in videos
]
lowercase_ : List[str] = {"""pixel_values""": videos}
return BatchFeature(data=snake_case__, tensor_type=snake_case__ ) | 458 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class snake_case :
a_ : Dict = PegasusConfig
a_ : int = {}
a_ : Any = """gelu"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=40 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , ) ->Any:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = eos_token_id
a_ = pad_token_id
a_ = bos_token_id
def UpperCAmelCase__ ( self) ->Dict:
a_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
a_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
a_ = tf.concat([input_ids, eos_tensor] , axis=1)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a_ = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
return config, inputs_dict
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = TFPegasusModel(config=__UpperCAmelCase).get_decoder()
a_ = inputs_dict["input_ids"]
a_ = input_ids[:1, :]
a_ = inputs_dict["attention_mask"][:1, :]
a_ = inputs_dict["head_mask"]
a_ = 1
# first forward pass
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase)
a_ , a_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
a_ = tf.concat([input_ids, next_tokens] , axis=-1)
a_ = tf.concat([attention_mask, next_attn_mask] , axis=-1)
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)[0]
a_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
a_ = int(ids_tensor((1,) , output_from_past.shape[-1]))
a_ = output_from_no_past[:, -3:, random_slice_idx]
a_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ) ->int:
"""simple docstring"""
if attention_mask is None:
a_ = tf.cast(tf.math.not_equal(UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Union[str, Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
a_ : List[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
a_ : int = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
a_ : int = True
a_ : int = False
a_ : int = False
def UpperCAmelCase__ ( self) ->int:
a_ = TFPegasusModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase)
@require_sentencepiece
@require_tokenizers
@require_tf
class snake_case ( unittest.TestCase ):
a_ : Optional[Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
a_ : Dict = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
a_ : str = """google/pegasus-xsum"""
@cached_property
def UpperCAmelCase__ ( self) ->Tuple:
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def UpperCAmelCase__ ( self) ->Any:
a_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->List[str]:
a_ = self.translate_src_text(**__UpperCAmelCase)
assert self.expected_text == generated_words
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Dict:
a_ = self.tokenizer(self.src_text , **__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="tf")
a_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , )
a_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase)
return generated_words
@slow
def UpperCAmelCase__ ( self) ->str:
self._assert_generated_batch_equal_expected() | 210 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = '▁'
UpperCamelCase_ = {'vocab_file': 'prophetnet.tokenizer'}
UpperCamelCase_ = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
UpperCamelCase_ = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
UpperCamelCase_ = {
'microsoft/xprophetnet-large-wiki100-cased': 512,
}
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = collections.OrderedDict()
with open(UpperCAmelCase , "r" , encoding="utf-8" ) as reader:
a_ = reader.readlines()
for index, token in enumerate(UpperCAmelCase ):
a_ = token.rstrip("\n" )
a_ = index
return vocab
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = VOCAB_FILES_NAMES
a_ : Any = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase = None , **__UpperCAmelCase , ) ->None:
a_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
a_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__UpperCAmelCase))
a_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
a_ = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10):
a_ = F'''[unused{i}]'''
a_ = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
a_ = 12
a_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__UpperCAmelCase)
def __getstate__( self) ->Union[str, Any]:
a_ = self.__dict__.copy()
a_ = None
return state
def __setstate__( self , __UpperCAmelCase) ->List[str]:
a_ = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
a_ = {}
a_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase)
if token_ids_a is None:
return ([0] * len(__UpperCAmelCase)) + [1]
return ([0] * len(__UpperCAmelCase)) + [1] + ([0] * len(__UpperCAmelCase)) + [1]
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[int]:
a_ = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def UpperCAmelCase__ ( self) ->Any:
return len(self.sp_model) + self.fairseq_offset
def UpperCAmelCase__ ( self) ->List[str]:
a_ = {self.convert_ids_to_tokens(__UpperCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->str:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ = self.sp_model.PieceToId(__UpperCAmelCase)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Dict:
a_ = "".join(__UpperCAmelCase).replace(__UpperCAmelCase , " ").strip()
return out_string
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->Tuple[str]:
if not os.path.isdir(__UpperCAmelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
a_ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__UpperCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __UpperCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(__UpperCAmelCase , "wb") as fi:
a_ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase)
return (out_vocab_file,)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
a_ = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep | 210 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__magic_name__ : Optional[int] = logging.get_logger(__name__)
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , *_A , **_A ):
'''simple docstring'''
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , _A , )
super().__init__(*_A , **_A )
| 102 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , UpperCAmelCase__ ).groups()[0]
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def __init__(self , __a , __a=None , __a=None ):
'''simple docstring'''
lowerCamelCase = file_names
lowerCamelCase = image_transform
lowerCamelCase = label_to_id
def __len__(self ):
'''simple docstring'''
return len(self.file_names )
def __getitem__(self , __a ):
'''simple docstring'''
lowerCamelCase = self.file_names[idx]
lowerCamelCase = PIL.Image.open(__a )
lowerCamelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
lowerCamelCase = self.image_transform(__a )
lowerCamelCase = extract_label(__a )
if self.label_to_id is not None:
lowerCamelCase = self.label_to_id[label]
return {"image": image, "label": label}
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if args.with_tracking:
lowerCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase = config["lr"]
lowerCamelCase = int(config["num_epochs"] )
lowerCamelCase = int(config["seed"] )
lowerCamelCase = int(config["batch_size"] )
lowerCamelCase = config["image_size"]
if not isinstance(UpperCAmelCase__ , (list, tuple) ):
lowerCamelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
lowerCamelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCamelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
lowerCamelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCamelCase = os.path.split(UpperCAmelCase__ )[-1].split("." )[0]
accelerator.init_trackers(UpperCAmelCase__ , UpperCAmelCase__ )
# Grab all the image filenames
lowerCamelCase = [os.path.join(args.data_dir , UpperCAmelCase__ ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
lowerCamelCase = [extract_label(UpperCAmelCase__ ) for fname in file_names]
lowerCamelCase = list(set(UpperCAmelCase__ ) )
id_to_label.sort()
lowerCamelCase = {lbl: i for i, lbl in enumerate(UpperCAmelCase__ )}
# Set the seed before splitting the data.
np.random.seed(UpperCAmelCase__ )
torch.manual_seed(UpperCAmelCase__ )
torch.cuda.manual_seed_all(UpperCAmelCase__ )
# Split our filenames between train and validation
lowerCamelCase = np.random.permutation(len(UpperCAmelCase__ ) )
lowerCamelCase = int(0.8 * len(UpperCAmelCase__ ) )
lowerCamelCase = random_perm[:cut]
lowerCamelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCamelCase = Compose([RandomResizedCrop(UpperCAmelCase__ , scale=(0.5, 1.0) ), ToTensor()] )
lowerCamelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=UpperCAmelCase__ , label_to_id=UpperCAmelCase__ )
# For evaluation, we use a deterministic Resize
lowerCamelCase = Compose([Resize(UpperCAmelCase__ ), ToTensor()] )
lowerCamelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=UpperCAmelCase__ , label_to_id=UpperCAmelCase__ )
# Instantiate dataloaders.
lowerCamelCase = DataLoader(UpperCAmelCase__ , shuffle=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , num_workers=4 )
lowerCamelCase = DataLoader(UpperCAmelCase__ , shuffle=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase = create_model("resnet50d" , pretrained=UpperCAmelCase__ , num_classes=len(UpperCAmelCase__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCamelCase = False
for param in model.get_classifier().parameters():
lowerCamelCase = True
# We normalize the batches of images to be a bit faster.
lowerCamelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
lowerCamelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowerCamelCase = OneCycleLR(optimizer=UpperCAmelCase__ , max_lr=UpperCAmelCase__ , epochs=UpperCAmelCase__ , steps_per_epoch=len(UpperCAmelCase__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase = 0
# We also need to keep track of the starting epoch so files are named properly
lowerCamelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCamelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCamelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCamelCase = os.path.splitext(UpperCAmelCase__ )[0]
if "epoch" in training_difference:
lowerCamelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
lowerCamelCase = None
else:
lowerCamelCase = int(training_difference.replace("step_" , "" ) )
lowerCamelCase = resume_step // len(UpperCAmelCase__ )
resume_step -= starting_epoch * len(UpperCAmelCase__ )
# Now we train the model
for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ):
model.train()
if args.with_tracking:
lowerCamelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCamelCase = accelerator.skip_first_batches(UpperCAmelCase__ , UpperCAmelCase__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCamelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase = (batch["image"] - mean) / std
lowerCamelCase = model(UpperCAmelCase__ )
lowerCamelCase = torch.nn.functional.cross_entropy(UpperCAmelCase__ , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(UpperCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCamelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCamelCase = os.path.join(args.output_dir , UpperCAmelCase__ )
accelerator.save_state(UpperCAmelCase__ )
model.eval()
lowerCamelCase = 0
lowerCamelCase = 0
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase = (batch["image"] - mean) / std
with torch.no_grad():
lowerCamelCase = model(UpperCAmelCase__ )
lowerCamelCase = outputs.argmax(dim=-1 )
lowerCamelCase , lowerCamelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
lowerCamelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCamelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(UpperCAmelCase__ ),
"epoch": epoch,
} , step=UpperCAmelCase__ , )
if checkpointing_steps == "epoch":
lowerCamelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
lowerCamelCase = os.path.join(args.output_dir , UpperCAmelCase__ )
accelerator.save_state(UpperCAmelCase__ )
if args.with_tracking:
accelerator.end_training()
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=UpperCAmelCase__ , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=UpperCAmelCase__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=UpperCAmelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCamelCase = parser.parse_args()
lowerCamelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main() | 623 | 0 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowercase__ ( ):
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , __UpperCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowercase__ ( ):
'''simple docstring'''
assert _test_patching.open is open
__lowercase = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , __UpperCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowercase__ ( ):
'''simple docstring'''
__lowercase = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , __UpperCamelCase ):
pass
def lowercase__ ( ):
'''simple docstring'''
__lowercase = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , __UpperCamelCase ) is None
with patch_submodule(_test_patching , """len""" , __UpperCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowercase__ ( ):
'''simple docstring'''
__lowercase = """__test_patch_submodule_start_and_stop_mock__"""
__lowercase = patch_submodule(_test_patching , """open""" , __UpperCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowercase__ ( ):
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = """__test_patch_submodule_successive_join__"""
__lowercase = """__test_patch_submodule_successive_dirname__"""
__lowercase = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , __UpperCamelCase ):
with patch_submodule(_test_patching , """os.rename""" , __UpperCamelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , __UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , __UpperCamelCase ):
with patch_submodule(_test_patching , """os.path.join""" , __UpperCamelCase ):
with patch_submodule(_test_patching , """os.path.dirname""" , __UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowercase__ ( ):
'''simple docstring'''
__lowercase = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , __UpperCamelCase ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , __UpperCamelCase ):
pass
| 716 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
snake_case : Optional[int] = 500_000
snake_case , snake_case : Optional[int] = os.path.split(__file__)
snake_case : Any = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def lowercase__ ( __UpperCamelCase : datasets.Dataset , **__UpperCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = dataset.map(**__UpperCamelCase )
@get_duration
def lowercase__ ( __UpperCamelCase : datasets.Dataset , **__UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__lowercase = dataset.filter(**__UpperCamelCase )
def lowercase__ ( ):
'''simple docstring'''
__lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
__lowercase = generate_example_dataset(
os.path.join(__UpperCamelCase , """dataset.arrow""" ) , __UpperCamelCase , num_examples=__UpperCamelCase )
__lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__UpperCamelCase )
def tokenize(__UpperCamelCase : List[Any] ):
return tokenizer(examples["""text"""] )
__lowercase = map(__UpperCamelCase )
__lowercase = map(__UpperCamelCase , batched=__UpperCamelCase )
__lowercase = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""numpy""" ):
__lowercase = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""pandas""" ):
__lowercase = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
__lowercase = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
__lowercase = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
__lowercase = map(__UpperCamelCase , function=__UpperCamelCase , batched=__UpperCamelCase )
__lowercase = filter(__UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__UpperCamelCase , """wb""" ) as f:
f.write(json.dumps(__UpperCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 339 | 0 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class A_ :
@staticmethod
def _lowercase ( *__lowerCAmelCase: Tuple ,**__lowerCAmelCase: int ):
'''simple docstring'''
pass
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : int = np.array(UpperCamelCase__ )
_lowerCamelCase : Optional[Any] = npimg.shape
return {"hash": hashimage(UpperCamelCase__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class A_ ( unittest.TestCase ):
lowerCAmelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = MaskGenerationPipeline(model=__lowercase ,image_processor=__lowercase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
@slow
@require_torch
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = pipeline("mask-generation" ,model="facebook/sam-vit-huge" )
_lowerCamelCase : Any = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" ,points_per_batch=256 )
# Shortening by hashing
_lowerCamelCase : Union[str, Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__lowercase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__lowercase ,decimals=4 ) ,[
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] ,)
# fmt: on
@require_torch
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : int = '''facebook/sam-vit-huge'''
_lowerCamelCase : Optional[int] = pipeline("mask-generation" ,model=__lowercase )
_lowerCamelCase : List[str] = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" ,pred_iou_thresh=1 ,points_per_batch=256 )
# Shortening by hashing
_lowerCamelCase : List[str] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(__lowercase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__lowercase ,decimals=4 ) ,[
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] ,) | 46 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 296 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ = 16
A_ = 32
def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : int = 16 ):
"""simple docstring"""
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_snake_case : Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : int = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : str = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : Union[str, Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
_snake_case : Dict = 8
else:
_snake_case : List[Any] = None
return tokenizer.pad(
__UpperCamelCase , padding="""longest""" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
_snake_case : Any = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
_snake_case : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Optional[int] ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCamelCase ) == "1":
_snake_case : Tuple = 2
# New Code #
_snake_case : Optional[int] = int(args.gradient_accumulation_steps )
_snake_case : List[Any] = int(args.local_sgd_steps )
# Initialize accelerator
_snake_case : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Tuple = config['''lr''']
_snake_case : List[Any] = int(config["""num_epochs"""] )
_snake_case : int = int(config["""seed"""] )
_snake_case : List[Any] = int(config["""batch_size"""] )
_snake_case : List[Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(__UpperCamelCase )
_snake_case : List[Any] = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : Dict = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : Dict = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
_snake_case : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case : Dict = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=__UpperCamelCase , model=__UpperCamelCase , local_sgd_steps=__UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
_snake_case : Tuple = model(**__UpperCamelCase )
_snake_case : str = output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : str = model(**__UpperCamelCase )
_snake_case : Dict = outputs.logits.argmax(dim=-1 )
_snake_case : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
_snake_case : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__UpperCamelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=__UpperCamelCase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_snake_case : Optional[int] = parser.parse_args()
_snake_case : Optional[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 710 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase( __a ):
'''simple docstring'''
lowercase__ = (IPNDMScheduler,)
lowercase__ = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self: Union[str, Any], **a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = {"""num_train_timesteps""": 1_000}
config.update(**a_ )
return config
def UpperCamelCase_ ( self: Tuple, a_: Optional[int]=0, **a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = dict(self.forward_default_kwargs )
_snake_case : Optional[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[Any] = self.dummy_sample
_snake_case : Dict = 0.1 * sample
_snake_case : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : int = self.get_scheduler_config(**a_ )
_snake_case : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[:]
if time_step is None:
_snake_case : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : Tuple = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_snake_case : Optional[Any] = dummy_past_residuals[:]
_snake_case : List[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : str = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : Optional[Any] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Optional[int] = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: str, a_: Any=0, **a_: Tuple ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : List[Any] = kwargs.pop("""num_inference_steps""", a_ )
_snake_case : Optional[int] = self.dummy_sample
_snake_case : Tuple = 0.1 * sample
_snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
_snake_case : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_snake_case : List[str] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : int = new_scheduler.step(a_, a_, a_, **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self: List[Any], **a_: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**a_ )
_snake_case : List[Any] = scheduler_class(**a_ )
_snake_case : Union[str, Any] = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Optional[Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Union[str, Any] = model(a_, a_ )
_snake_case : Any = scheduler.step(a_, a_, a_ ).prev_sample
return sample
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : int = kwargs.pop("""num_inference_steps""", a_ )
for scheduler_class in self.scheduler_classes:
_snake_case : Union[str, Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**a_ )
_snake_case : Dict = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(a_, """set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_, """set_timesteps""" ):
_snake_case : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_snake_case : List[str] = dummy_past_residuals[:]
_snake_case : Optional[int] = scheduler.timesteps[5]
_snake_case : Optional[Any] = scheduler.timesteps[6]
_snake_case : str = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : List[str] = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
_snake_case : Any = scheduler.step(a_, a_, a_, **a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a_, time_step=a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.full_loop()
_snake_case : Optional[int] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 28 | 0 |
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_ : list[float] , lowerCamelCase_ : Any ) -> Any:
"""simple docstring"""
print(F'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(lowerCamelCase_ ):
print(F'{i}\t\t{d}' )
def __UpperCAmelCase ( lowerCamelCase_ : list[dict[str, int]] , lowerCamelCase_ : list[float] , lowerCamelCase_ : int ) -> Any:
"""simple docstring"""
for j in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def __UpperCAmelCase ( lowerCamelCase_ : list[dict[str, int]] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [float('inf' )] * vertex_count
SCREAMING_SNAKE_CASE_ : List[str] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
SCREAMING_SNAKE_CASE_ : Any = distance[u] + w
SCREAMING_SNAKE_CASE_ : List[Any] = check_negative_cycle(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : str = int(input('''Enter number of vertices: ''').strip())
UpperCamelCase__ : Union[str, Any] = int(input('''Enter number of edges: ''').strip())
UpperCamelCase__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
UpperCamelCase__ : List[Any] = {'''src''': src, '''dst''': dest, '''weight''': weight}
UpperCamelCase__ : List[str] = int(input('''\nEnter shortest path source:''').strip())
UpperCamelCase__ : Union[str, Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 105 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: list ):
if len(_lowerCamelCase ) <= 1:
return [tuple(_lowerCamelCase )]
__SCREAMING_SNAKE_CASE : Optional[int] = []
def generate(_lowerCamelCase: int , _lowerCamelCase: list ):
__SCREAMING_SNAKE_CASE : Dict = [0] * n
res.append(tuple(_lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = arr[i], arr[0]
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = arr[i], arr[c[i]]
res.append(tuple(_lowerCamelCase ) )
c[i] += 1
__SCREAMING_SNAKE_CASE : Dict = 0
else:
__SCREAMING_SNAKE_CASE : List[str] = 0
i += 1
generate(len(_lowerCamelCase ) , _lowerCamelCase )
return res
if __name__ == "__main__":
UpperCamelCase__ : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ : List[Any] = [int(item) for item in user_input.split(''',''')]
print(heaps(arr)) | 578 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : List[str] =BlipImageProcessor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
SCREAMING_SNAKE_CASE__ : List[Any] =InstructBlipProcessor(__lowercase , __lowercase , __lowercase )
processor.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Any , **__lowercase : Union[str, Any] ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).tokenizer
def __magic_name__ ( self : Tuple , **__lowercase : Optional[int] ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).image_processor
def __magic_name__ ( self : Dict , **__lowercase : Optional[int] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).qformer_tokenizer
def __magic_name__ ( self : Optional[Any] ) -> str:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : List[str] =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Any =[Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : Tuple =InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ : int =self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
self.assertIsInstance(processor.qformer_tokenizer , __lowercase )
def __magic_name__ ( self : int ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] =self.get_image_processor()
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Dict =self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] =InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any =image_processor(__lowercase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ : int =processor(images=__lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : Dict =self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : List[Any] =InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] ='''lower newer'''
SCREAMING_SNAKE_CASE__ : Dict =processor(text=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer(__lowercase , return_token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =qformer_tokenizer(__lowercase , return_token_type_ids=__lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def __magic_name__ ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple =self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : str =InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] ='''lower newer'''
SCREAMING_SNAKE_CASE__ : Any =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : str =processor(text=__lowercase , images=__lowercase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple =self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : int =InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ : Any =processor.batch_decode(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.get_image_processor()
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : List[Any] =self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] =InstructBlipProcessor(
tokenizer=__lowercase , image_processor=__lowercase , qformer_tokenizer=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] ='''lower newer'''
SCREAMING_SNAKE_CASE__ : Dict =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any =processor(text=__lowercase , images=__lowercase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) | 665 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
SCREAMING_SNAKE_CASE__ : Dict =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Any =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE__ : List[str] =[sys.executable] + distributed_args
execute_subprocess_async(__lowercase , env=os.environ.copy() ) | 665 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase :int = {'vocab_file': 'vocab.txt'}
__lowerCamelCase :str = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__lowerCamelCase :List[Any] = {
'facebook/esm2_t6_8M_UR50D': 1_024,
'facebook/esm2_t12_35M_UR50D': 1_024,
}
def snake_case ( UpperCamelCase__ : Tuple ) -> Union[str, Any]:
with open(UpperCamelCase__ , """r""" ) as f:
lowerCamelCase : Optional[int] = f.read().splitlines()
return [l.strip() for l in lines]
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[str] =VOCAB_FILES_NAMES
snake_case__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Dict =['''input_ids''', '''attention_mask''']
def __init__( self: Any , __a: Union[str, Any] , __a: Optional[int]="<unk>" , __a: List[str]="<cls>" , __a: List[str]="<pad>" , __a: Optional[Any]="<mask>" , __a: Dict="<eos>" , **__a: Optional[int] , )-> Union[str, Any]:
super().__init__(**__a )
lowerCamelCase : str = load_vocab_file(__a )
lowerCamelCase : List[str] = dict(enumerate(self.all_tokens ) )
lowerCamelCase : List[str] = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCamelCase : List[str] = unk_token
lowerCamelCase : Union[str, Any] = cls_token
lowerCamelCase : Tuple = pad_token
lowerCamelCase : int = mask_token
lowerCamelCase : Union[str, Any] = eos_token
lowerCamelCase : int = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def a__ ( self: Optional[int] , __a: int )-> str:
return self._id_to_token.get(__a , self.unk_token )
def a__ ( self: Tuple , __a: str )-> int:
return self._token_to_id.get(__a , self._token_to_id.get(self.unk_token ) )
def a__ ( self: Dict , __a: str , **__a: Dict )-> Union[str, Any]:
return text.split()
def a__ ( self: Tuple , __a: int=False )-> Optional[int]:
return len(self._id_to_token )
def a__ ( self: Any )-> str:
return {token: i for i, token in enumerate(self.all_tokens )}
def a__ ( self: Optional[Any] , __a: str )-> int:
return self._token_to_id.get(__a , self._token_to_id.get(self.unk_token ) )
def a__ ( self: List[str] , __a: int )-> str:
return self._id_to_token.get(__a , self.unk_token )
def a__ ( self: List[Any] , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
lowerCamelCase : Optional[Any] = [self.cls_token_id]
lowerCamelCase : List[Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def a__ ( self: List[str] , __a: List , __a: Optional[List] = None , __a: bool = False )-> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCamelCase : int = [1] + ([0] * len(__a )) + [1]
if token_ids_a is not None:
mask += [0] * len(__a ) + [1]
return mask
def a__ ( self: Union[str, Any] , __a: Optional[Any] , __a: Optional[int] )-> Tuple:
lowerCamelCase : str = os.path.join(__a , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(__a , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def a__ ( self: str )-> int:
return self.get_vocab_size(with_added_tokens=__a )
def a__ ( self: Union[str, Any] , __a: Union[List[str], List[AddedToken]] , __a: bool = False )-> int:
return super()._add_tokens(__a , special_tokens=__a )
| 222 |
"""simple docstring"""
from string import ascii_uppercase
__lowerCamelCase :Dict = {char: i for i, char in enumerate(ascii_uppercase)}
__lowerCamelCase :str = dict(enumerate(ascii_uppercase))
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
lowerCamelCase : Tuple = len(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = 0
while True:
if x == i:
lowerCamelCase : Tuple = 0
if len(UpperCamelCase__ ) == len(UpperCamelCase__ ):
break
key += key[i]
i += 1
return key
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
lowerCamelCase : Any = """"""
lowerCamelCase : Optional[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowerCamelCase : Union[str, Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
lowerCamelCase : Dict = """"""
lowerCamelCase : int = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowerCamelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def snake_case ( ) -> None:
lowerCamelCase : int = """THE GERMAN ATTACK"""
lowerCamelCase : Union[str, Any] = """SECRET"""
lowerCamelCase : Union[str, Any] = generate_key(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = cipher_text(UpperCamelCase__ , UpperCamelCase__ )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(UpperCamelCase__ , UpperCamelCase__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 222 | 1 |
"""simple docstring"""
lowerCAmelCase__ = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 598 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : dict[int, int] = {}
_lowerCamelCase : Optional[Any] = 2
while True:
_lowerCamelCase : Union[str, Any] = factor_map.pop(A_, A_ )
if factor:
_lowerCamelCase : Tuple = factor + prime
while x in factor_map:
x += factor
_lowerCamelCase : Union[str, Any] = factor
else:
_lowerCamelCase : Any = prime
yield prime
prime += 1
def snake_case_ ( A_ : float = 1E10 ):
'''simple docstring'''
_lowerCamelCase : List[str] = sieve()
_lowerCamelCase : Dict = 1
while True:
_lowerCamelCase : Union[str, Any] = next(A_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(A_ )
n += 2
if __name__ == "__main__":
print(solution())
| 598 | 1 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ :
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=1_3 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Optional[int]=9_9 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=5_1_2 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[Any]="last" , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=0 , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_lengths
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = gelu_activation
__snake_case = sinusoidal_embeddings
__snake_case = causal
__snake_case = asm
__snake_case = n_langs
__snake_case = vocab_size
__snake_case = n_special
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = summary_type
__snake_case = use_proj
__snake_case = scope
__snake_case = bos_token_id
def lowercase__ ( self : str ):
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_input_lengths:
__snake_case = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , 2 ).float()
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self : Union[str, Any] ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase__ ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , ):
__snake_case = XLMModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = model(__lowerCAmelCase , lengths=__lowerCAmelCase , langs=__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase , langs=__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , ):
__snake_case = XLMWithLMHeadModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : int , ):
__snake_case = XLMForQuestionAnsweringSimple(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = model(__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
__snake_case = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , ):
__snake_case = XLMForQuestionAnswering(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = model(__lowerCAmelCase )
__snake_case = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , p_mask=__lowerCAmelCase , )
__snake_case = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , )
(__snake_case) = result_with_labels.to_tuple()
__snake_case = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
(__snake_case) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase__ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , ):
__snake_case = XLMForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = model(__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , ):
__snake_case = self.num_labels
__snake_case = XLMForTokenClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , ):
__snake_case = self.num_choices
__snake_case = XLMForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Tuple ):
__snake_case = self.prepare_config_and_inputs()
(
__snake_case
) = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class a_ ( A__ , A__ , A__ , unittest.TestCase ):
lowercase_ : Tuple = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ : Union[str, Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int]=False ):
__snake_case = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def lowercase__ ( self : Tuple ):
__snake_case = XLMModelTester(self )
__snake_case = ConfigTester(self , config_class=__lowerCAmelCase , emb_dim=3_7 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Tuple ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__lowerCAmelCase )
def lowercase__ ( self : List[str] ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__lowerCAmelCase )
def lowercase__ ( self : int ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__lowerCAmelCase )
def lowercase__ ( self : int ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__lowerCAmelCase )
def lowercase__ ( self : List[Any] ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__lowerCAmelCase )
def lowercase__ ( self : str ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__lowerCAmelCase )
def lowercase__ ( self : Any ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__lowerCAmelCase )
def lowercase__ ( self : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=1 ):
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(
[isinstance(__lowerCAmelCase , __lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(__lowerCAmelCase ) )
self.assertEqual(len(__lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__lowerCAmelCase ):
# adds PAD dummy token
__snake_case = min_length + idx + 1
__snake_case = min_length + idx + 1
__snake_case = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__lowerCAmelCase ) )
def lowercase__ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=False , __lowerCAmelCase : Dict=1 ):
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(
[isinstance(__lowerCAmelCase , __lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(__lowerCAmelCase ) , )
self.assertEqual(len(__lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__lowerCAmelCase ):
# adds PAD dummy token
__snake_case = min_length + idx + 1
__snake_case = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__lowerCAmelCase ) , )
pass
@slow
def lowercase__ ( self : List[str] ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = XLMModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class a_ ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
__snake_case = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(__lowerCAmelCase )
__snake_case = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=__lowerCAmelCase ) # the president
__snake_case = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__snake_case = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __lowerCAmelCase )
| 356 |
"""simple docstring"""
__lowerCamelCase = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def a ( __snake_case : dict, __snake_case : str, __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ :List[Any] = set()
# keep track of all the paths to be checked
UpperCAmelCase_ :Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCAmelCase_ :Tuple = queue.pop(0 )
# get the last node from the path
UpperCAmelCase_ :str = path[-1]
if node not in explored:
UpperCAmelCase_ :Any = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCAmelCase_ :Union[str, Any] = list(__snake_case )
new_path.append(__snake_case )
queue.append(__snake_case )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__snake_case )
# in case there's no path between the 2 nodes
return []
def a ( __snake_case : dict, __snake_case : int, __snake_case : str ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCAmelCase_ :Optional[Any] = [start]
UpperCAmelCase_ :str = set(__snake_case )
# Keep tab on distances from `start` node.
UpperCAmelCase_ :Optional[Any] = {start: 0, target: -1}
while queue:
UpperCAmelCase_ :Optional[Any] = queue.pop(0 )
if node == target:
UpperCAmelCase_ :str = (
dist[node] if dist[target] == -1 else min(dist[target], dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__snake_case )
queue.append(__snake_case )
UpperCAmelCase_ :Optional[int] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 608 | 0 |
import argparse
import json
import subprocess
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Optional[int] = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
lowerCAmelCase : int = subprocess.run(SCREAMING_SNAKE_CASE__ ,shell=SCREAMING_SNAKE_CASE__ ,stdout=subprocess.PIPE )
lowerCAmelCase : Optional[int] = output.stdout.decode("""utf-8""" )
lowerCAmelCase : Tuple = json.loads(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(SCREAMING_SNAKE_CASE__ )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
lowerCAmelCase : Union[str, Any] = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return values.split(""",""" )
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 701 |
lowerCAmelCase : str ={
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 693 | 0 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 51 |
"""simple docstring"""
from itertools import count
def __magic_name__ ( _lowerCamelCase: int = 50 ) -> int:
'''simple docstring'''
lowerCAmelCase = [1] * min_block_length
for n in count(_lowerCamelCase ):
fill_count_functions.append(1 )
for block_length in range(_lowerCamelCase, n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 535 | 0 |
'''simple docstring'''
from statistics import mean, stdev
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase = 3 )-> list:
__UpperCAmelCase = min(_lowerCAmelCase )
__UpperCAmelCase = max(_lowerCAmelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _lowerCAmelCase ) for x in data]
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase = 3 )-> list:
__UpperCAmelCase = mean(_lowerCAmelCase )
__UpperCAmelCase = stdev(_lowerCAmelCase )
# standardize data
return [round((x - mu) / (sigma) , _lowerCAmelCase ) for x in data]
| 617 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A: str = logging.get_logger(__name__)
_A: Optional[Any] = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : Tuple = """autoformer"""
_A : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , __A = None , __A = None , __A = "student_t" , __A = "nll" , __A = 1 , __A = [1, 2, 3, 4, 5, 6, 7] , __A = True , __A = 0 , __A = 0 , __A = 0 , __A = 0 , __A = None , __A = None , __A = 64 , __A = 2 , __A = 2 , __A = 2 , __A = 2 , __A = 32 , __A = 32 , __A = "gelu" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 100 , __A = 0.0_2 , __A = True , __A=True , __A = 10 , __A = 25 , __A = 3 , **__A , ):
# time series specific configuration
__UpperCAmelCase = prediction_length
__UpperCAmelCase = context_length if context_length is not None else prediction_length
__UpperCAmelCase = distribution_output
__UpperCAmelCase = loss
__UpperCAmelCase = input_size
__UpperCAmelCase = num_time_features
__UpperCAmelCase = lags_sequence
__UpperCAmelCase = scaling
__UpperCAmelCase = num_dynamic_real_features
__UpperCAmelCase = num_static_real_features
__UpperCAmelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__A ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__UpperCAmelCase = cardinality
else:
__UpperCAmelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__A ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__UpperCAmelCase = embedding_dimension
else:
__UpperCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
__UpperCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = use_cache
# Autoformer
__UpperCAmelCase = label_length
__UpperCAmelCase = moving_average
__UpperCAmelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=__A , **__A )
@property
def __lowerCamelCase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 617 | 1 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Optional[int] = CLIPTokenizerFast
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Optional[Any] = False
def __A ( self : int ):
super().setUp()
# fmt: off
A_ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
A_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
A_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
A_ = {"unk_token": "<unk>"}
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase ) )
def __A ( self : Union[str, Any] , **UpperCAmelCase : str ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def __A ( self : Tuple , **UpperCAmelCase : List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ):
A_ = "lower newer"
A_ = "lower newer"
return input_text, output_text
def __A ( self : Optional[int] ):
A_ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A_ = "lower newer"
A_ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
A_ = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = tokens + [tokenizer.unk_token]
A_ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
@require_ftfy
def __A ( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
A_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
A_ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
A_ = tokenizer_s.tokenize(UpperCAmelCase )
A_ = tokenizer_r.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
A_ = "xa\u0303y" + " " + "x\xe3y"
A_ = tokenizer_s.tokenize(UpperCAmelCase )
A_ = tokenizer_r.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Test that the tokenization is identical on unicode of space type
A_ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
A_ = tokenizer_s.tokenize(UpperCAmelCase )
A_ = tokenizer_r.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Test that the tokenization is identical on unicode of line break type
A_ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
A_ = tokenizer_s.tokenize(UpperCAmelCase )
A_ = tokenizer_r.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
A_ = f'''{text_of_1_token} {text_of_1_token}'''
A_ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , )
A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase ) + 1, len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
A_ = f''' {text}'''
A_ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , )
A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase ) + 1, 1 + len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
def __A ( self : List[str] ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(UpperCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def __A ( self : Dict ):
super().test_tokenization_python_rust_equals()
def __A ( self : List[Any] ):
# CLIP always lower cases letters
pass | 86 |
def lowercase ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if not numbers:
return 0
if not isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) or not all(
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = numbers[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# update the maximum and minimum subarray products
SCREAMING_SNAKE_CASE_ = numbers[i]
if number < 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = min_till_now, max_till_now
SCREAMING_SNAKE_CASE_ = max(SCREAMING_SNAKE_CASE , max_till_now * number )
SCREAMING_SNAKE_CASE_ = min(SCREAMING_SNAKE_CASE , min_till_now * number )
# update the maximum product found till now
SCREAMING_SNAKE_CASE_ = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max_prod
| 205 | 0 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase :Any = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase :Any = 1_0
lowerCamelCase :Any = 2_5_6
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(_lowercase ) < MIN_NUM_TOKENS:
return None
A_ : str = MinHash(num_perm=_lowercase )
for token in set(_lowercase ):
min_hash.update(token.encode() )
return min_hash
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(_lowercase ) if len(t.strip() ) > 0}
class _lowerCAmelCase :
def __init__(self , *,
lowercase = 0.85 , ):
A_ : Optional[Any] = duplication_jaccard_threshold
A_ : Tuple = NUM_PERM
A_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
A_ : List[str] = defaultdict(__A )
def _a (self , lowercase , lowercase ):
A_ : Any = self._index.query(__A )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(__A , __A )
if len(__A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__A )
def _a (self ):
A_ : str = []
for base, duplicates in self._duplicate_clusters.items():
A_ : int = [base] + list(__A )
# reformat the cluster to be a list of dict
A_ : List[str] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__A )
return duplicate_clusters
def _a (self , lowercase ):
A_ : Optional[int] = self.get_duplicate_clusters()
with open(__A , """w""" ) as f:
json.dump(__A , __A )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_, A_ : int = element
A_ : Dict = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def a ( lowerCamelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_lowercase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = DuplicationIndex(duplication_jaccard_threshold=_lowercase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowercase ) ) , max_queue_size=1_00 ) ):
di.add(_lowercase , _lowercase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = get_tokens(_lowercase )
A_ : Dict = get_tokens(_lowercase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase :Any = None
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = []
for elementa in cluster:
A_ : Optional[Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
A_ : List[Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowercase , _lowercase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
A_ : Optional[Any] = 1
extremes.append(_lowercase )
return extremes
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
global _shared_dataset
A_ : Tuple = dataset
A_ : Any = []
A_ : List[Any] = partial(_find_cluster_extremes_shared , jaccard_threshold=_lowercase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowercase , _lowercase , ) , total=len(_lowercase ) , ):
extremes_list.append(_lowercase )
return extremes_list
def a ( lowerCamelCase__ , lowerCamelCase__ = 0.85 ):
'''simple docstring'''
A_ : List[Any] = make_duplicate_clusters(_lowercase , _lowercase )
A_ : Union[str, Any] = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
A_ : Union[str, Any] = {}
A_ : Dict = find_extremes(_lowercase , _lowercase , _lowercase )
for extremes in extremes_clusters:
for element in extremes:
A_ : str = element
A_ : Union[str, Any] = duplicate_indices - set(extreme_dict.keys() )
A_ : str = dataset.filter(lambda lowerCamelCase__ , lowerCamelCase__ : idx not in remove_indices , with_indices=_lowercase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
A_ : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
A_ : Tuple = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(_lowercase )}' )
print(f'Number of duplicate clusters: {len(_lowercase )}' )
print(f'Files in duplicate cluster: {len(_lowercase )}' )
print(f'Unique files in duplicate cluster: {len(_lowercase )}' )
print(f'Filtered dataset size: {len(_lowercase )}' )
return ds_filter, duplicate_clusters | 718 |
'''simple docstring'''
import pytest
lowerCamelCase :Optional[Any] = '''__dummy_dataset1__'''
lowerCamelCase :List[Any] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dataset_loading_script_name
A_ : int = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
A_ : Tuple = script_dir / f'{script_name}.py'
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ ) | 686 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
A : Any = 3_0_0 # TEMPERATURE (unit = K)
def UpperCamelCase ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
class _UpperCAmelCase ( __a):
__a : Dict = ["""input_features""", """attention_mask"""]
def __init__( self , _A=80 , _A=1_60_00 , _A=0.0 , _A=10 , _A=25 , _A="hamming_window" , _A=32768.0 , _A=0.97 , _A=1.0 , _A=True , _A=True , _A=False , **_A , ) -> List[Any]:
'''simple docstring'''
super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A )
_UpperCAmelCase : List[str] = feature_size
_UpperCAmelCase : Optional[int] = sampling_rate
_UpperCAmelCase : List[str] = padding_value
_UpperCAmelCase : Dict = hop_length
_UpperCAmelCase : List[str] = win_length
_UpperCAmelCase : Tuple = frame_signal_scale
_UpperCAmelCase : Optional[Any] = preemphasis_coeff
_UpperCAmelCase : int = mel_floor
_UpperCAmelCase : Tuple = normalize_means
_UpperCAmelCase : str = normalize_vars
_UpperCAmelCase : List[Any] = win_function
_UpperCAmelCase : List[Any] = return_attention_mask
_UpperCAmelCase : str = win_length * sampling_rate // 10_00
_UpperCAmelCase : List[str] = hop_length * sampling_rate // 10_00
_UpperCAmelCase : List[str] = optimal_fft_length(self.sample_size )
_UpperCAmelCase : Dict = (self.n_fft // 2) + 1
def __snake_case ( self , _A ) -> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
_UpperCAmelCase : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=_A )
else:
_UpperCAmelCase : int = window_function(window_length=self.sample_size , name=self.win_function )
_UpperCAmelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_UpperCAmelCase : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=_A , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_A , preemphasis=self.preemphasis_coeff , mel_filters=_A , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def __snake_case ( self , _A , _A , _A ) -> Any:
'''simple docstring'''
if self.normalize_means:
_UpperCAmelCase : List[Any] = x[:input_length].mean(axis=0 )
_UpperCAmelCase : List[str] = np.subtract(_A , _A )
if self.normalize_vars:
_UpperCAmelCase : Dict = x[:input_length].std(axis=0 )
_UpperCAmelCase : Tuple = np.divide(_A , _A )
if input_length < x.shape[0]:
_UpperCAmelCase : Optional[Any] = padding_value
# make sure array is in float32
_UpperCAmelCase : Any = x.astype(np.floataa )
return x
def __snake_case ( self , _A , _A = None ) -> List[np.ndarray]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_A , _A , self.padding_value ) for x, n in zip(_A , _A )]
def __call__( self , _A , _A = False , _A = None , _A = False , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_UpperCAmelCase : Any = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_UpperCAmelCase : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : Tuple = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
_UpperCAmelCase : str = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : Optional[int] = [raw_speech]
# extract fbank features
_UpperCAmelCase : int = [self._extract_mfsc_features(_A ) for one_waveform in raw_speech]
# convert into correct format for padding
_UpperCAmelCase : Any = BatchFeature({"""input_features""": features} )
_UpperCAmelCase : Dict = self.pad(
_A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , )
# make sure list is in array format
_UpperCAmelCase : List[Any] = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , _A ):
_UpperCAmelCase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
_UpperCAmelCase : Optional[int] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
_UpperCAmelCase : Any = [np.asarray(_A , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_UpperCAmelCase : Union[str, Any] = (
np.array(_A , dtype=np.intaa )
if self._get_padding_strategies(_A , max_length=_A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_UpperCAmelCase : List[str] = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_A )
if return_tensors is not None:
_UpperCAmelCase : Dict = padded_inputs.convert_to_tensors(_A )
return padded_inputs
| 238 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : bool = field(default=lowerCAmelCase , metadata={"""help""": """Whether to use SortishSampler or not."""} )
_lowerCAmelCase : bool = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_lowerCAmelCase : Optional[int] = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
_lowerCAmelCase : Optional[int] = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
_lowerCAmelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def snake_case ( self ):
"""simple docstring"""
snake_case = super().to_dict()
for k, v in d.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
snake_case = v.to_dict()
return d
| 704 | """simple docstring"""
import math
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> bool:
"""simple docstring"""
return math.sqrt(_UpperCamelCase ) * math.sqrt(_UpperCamelCase ) == num
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> bool:
"""simple docstring"""
snake_case = 0
snake_case = n
while left <= right:
snake_case = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
snake_case = mid - 1
else:
snake_case = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : List[str] = logging.get_logger(__name__)
def __a ( A__ , A__=False , A__=False ) -> int:
lowerCAmelCase = "backbone." if is_semantic else ""
lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"{prefix}cls_token", "beit.embeddings.cls_token"),
(f"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
(f"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
(f"{prefix}pos_embed", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __a ( A__ , A__ , A__=False , A__=False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase = "backbone." if is_semantic else ""
# queries, keys and values
lowerCAmelCase = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight" )
lowerCAmelCase = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias" )
lowerCAmelCase = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias" )
lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase = q_bias
lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCAmelCase = state_dict.pop(f"{prefix}blocks.{i}.gamma_1" )
lowerCAmelCase = state_dict.pop(f"{prefix}blocks.{i}.gamma_2" )
lowerCAmelCase = gamma_a
lowerCAmelCase = gamma_a
def __a ( A__ , A__ , A__ ) -> str:
lowerCAmelCase = dct.pop(A__ )
lowerCAmelCase = val
def __a ( ) -> str:
lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __a ( A__ , A__ , A__=False ) -> str:
lowerCAmelCase = False if "rvlcdip" in checkpoint_url else True
lowerCAmelCase = BeitConfig(use_absolute_position_embeddings=A__ , use_mask_token=A__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCAmelCase = 1024
lowerCAmelCase = 4096
lowerCAmelCase = 24
lowerCAmelCase = 16
# labels
if "rvlcdip" in checkpoint_url:
lowerCAmelCase = 16
lowerCAmelCase = "huggingface/label-files"
lowerCAmelCase = "rvlcdip-id2label.json"
lowerCAmelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase = {int(A__ ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCAmelCase = torch.hub.load_state_dict_from_url(A__ , map_location="cpu" )["model"]
lowerCAmelCase = create_rename_keys(A__ , has_lm_head=A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , has_lm_head=A__ )
# load HuggingFace model
lowerCAmelCase = BeitForMaskedImageModeling(A__ ) if has_lm_head else BeitForImageClassification(A__ )
model.eval()
model.load_state_dict(A__ )
# Check outputs on an image
lowerCAmelCase = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=A__ )
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=A__ , return_tensors="pt" )
lowerCAmelCase = encoding["pixel_values"]
lowerCAmelCase = model(A__ )
lowerCAmelCase = outputs.logits
# verify logits
lowerCAmelCase = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(A__ ), "Shape of logits not as expected"
Path(A__ ).mkdir(exist_ok=A__ )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(A__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(A__ )
if push_to_hub:
if has_lm_head:
lowerCAmelCase = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowerCAmelCase = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=A__ , )
model.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=A__ , )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
lowercase : Tuple = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 649 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : Optional[int] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 649 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 714 |
import copy
import random
from transformers import CLIPTokenizer
class __lowercase( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Optional[Any] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ) -> str:
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = {}
def SCREAMING_SNAKE_CASE_ ( self : List[str] , _lowerCAmelCase : List[Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[str] ) -> List[Any]:
_lowerCAmelCase = super().add_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
' `placeholder_token` that is not already in the tokenizer.' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , _lowerCAmelCase : int , *_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=1 , **_lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
_lowerCAmelCase = []
if num_vec_per_token == 1:
self.try_adding_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
output.append(_lowerCAmelCase )
else:
_lowerCAmelCase = []
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = placeholder_token + F'''_{i}'''
self.try_adding_tokens(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
output.append(_lowerCAmelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
_lowerCAmelCase = output
def SCREAMING_SNAKE_CASE_ ( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any=False , _lowerCAmelCase : int=1.0 ) -> List[str]:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = []
for i in range(len(_lowerCAmelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=_lowerCAmelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_lowerCAmelCase = self.token_map[placeholder_token]
_lowerCAmelCase = tokens[: 1 + int(len(_lowerCAmelCase ) * prop_tokens_to_load )]
if vector_shuffle:
_lowerCAmelCase = copy.copy(_lowerCAmelCase )
random.shuffle(_lowerCAmelCase )
_lowerCAmelCase = text.replace(_lowerCAmelCase , ' '.join(_lowerCAmelCase ) )
return text
def __call__( self : Any , _lowerCAmelCase : Any , *_lowerCAmelCase : str , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[int]=1.0 , **_lowerCAmelCase : int ) -> int:
return super().__call__(
self.replace_placeholder_tokens_in_text(
_lowerCAmelCase , vector_shuffle=_lowerCAmelCase , prop_tokens_to_load=_lowerCAmelCase ) , *_lowerCAmelCase , **_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE_ ( self : Any , _lowerCAmelCase : str , *_lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : List[Any]=1.0 , **_lowerCAmelCase : str ) -> List[str]:
return super().encode(
self.replace_placeholder_tokens_in_text(
_lowerCAmelCase , vector_shuffle=_lowerCAmelCase , prop_tokens_to_load=_lowerCAmelCase ) , *_lowerCAmelCase , **_lowerCAmelCase , )
| 585 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 645 | """simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
a__ = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
a__ = AutoTokenizer.from_pretrained('xlm-roberta-base' )
a__ = 'The dog is cute and lives in the garden house'
a__ = jnp.array([tokenizer.encode(_snake_case )] )
a__ = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
a__ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
a__ = model(_snake_case )['last_hidden_state']
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _snake_case , atol=1E-3 ) )
| 232 | 0 |
def __lowercase ( _A ) -> list:
if any(not isinstance(_A , _A ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(_A ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_A , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 701 |
from __future__ import annotations
import numpy as np
def __lowercase ( _A ) -> tuple[np.ndarray, np.ndarray]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = np.shape(_A )
if rows != columns:
SCREAMING_SNAKE_CASE : int = (
"""'table' has to be of square shaped array but got a """
F"{rows}x{columns} array:\n{table}"
)
raise ValueError(_A )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE : Optional[Any] = np.zeros((rows, columns) )
for i in range(_A ):
for j in range(_A ):
SCREAMING_SNAKE_CASE : Tuple = sum(lower[i][k] * upper[k][j] for k in range(_A ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE : Optional[Any] = 1
for j in range(_A , _A ):
SCREAMING_SNAKE_CASE : List[str] = sum(lower[i][k] * upper[k][j] for k in range(_A ) )
SCREAMING_SNAKE_CASE : Optional[Any] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 446 | 0 |
"""simple docstring"""
import math
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = input("""Enter message: """ )
lowerCAmelCase__ = int(input(f"""Enter key [2-{len(lowerCamelCase__ ) - 1}]: """ ) )
lowerCAmelCase__ = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowerCAmelCase__ = encrypt_message(lowerCamelCase__ , lowerCamelCase__ )
elif mode.lower().startswith("""d""" ):
lowerCAmelCase__ = decrypt_message(lowerCamelCase__ , lowerCamelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + '|'}""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = [""""""] * key
for col in range(lowerCamelCase__ ):
lowerCAmelCase__ = col
while pointer < len(lowerCamelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCamelCase__ )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = math.ceil(len(lowerCamelCase__ ) / key )
lowerCAmelCase__ = key
lowerCAmelCase__ = (num_cols * num_rows) - len(lowerCamelCase__ )
lowerCAmelCase__ = [""""""] * num_cols
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowerCAmelCase__ = 0
row += 1
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 644 | """simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( __UpperCamelCase ):
def __init__( self : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any]=768 ):
super().__init__(snake_case__ )
lowerCAmelCase__ = proj_size
lowerCAmelCase__ = CLIPVisionModel(snake_case__ )
lowerCAmelCase__ = PaintByExampleMapper(snake_case__ )
lowerCAmelCase__ = nn.LayerNorm(config.hidden_size )
lowerCAmelCase__ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
lowerCAmelCase__ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any]=False ):
lowerCAmelCase__ = self.model(pixel_values=snake_case__ )
lowerCAmelCase__ = clip_output.pooler_output
lowerCAmelCase__ = self.mapper(latent_states[:, None] )
lowerCAmelCase__ = self.final_layer_norm(snake_case__ )
lowerCAmelCase__ = self.proj_out(snake_case__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class a_ ( nn.Module ):
def __init__( self : int , snake_case__ : Dict ):
super().__init__()
lowerCAmelCase__ = (config.num_hidden_layers + 1) // 5
lowerCAmelCase__ = config.hidden_size
lowerCAmelCase__ = 1
lowerCAmelCase__ = nn.ModuleList(
[
BasicTransformerBlock(snake_case__ , snake_case__ , snake_case__ , activation_fn="""gelu""" , attention_bias=snake_case__ )
for _ in range(snake_case__ )
] )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Optional[int] ):
for block in self.blocks:
lowerCAmelCase__ = block(snake_case__ )
return hidden_states
| 644 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =IFInpaintingSuperResolutionPipeline
SCREAMING_SNAKE_CASE_ : int =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
SCREAMING_SNAKE_CASE_ : Union[str, Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
SCREAMING_SNAKE_CASE_ : List[Any] =PipelineTesterMixin.required_optional_params - {"latents"}
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 ):
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
self._test_save_load_local()
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 170 |
from __future__ import annotations
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int | float:
if len(_lowercase ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(_lowercase )
or left < -len(_lowercase )
or right >= len(_lowercase )
or right < -len(_lowercase )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
UpperCamelCase = (left + right) >> 1 # the middle
UpperCamelCase = find_max(_lowercase , _lowercase , _lowercase ) # find max in range[left, mid]
UpperCamelCase = find_max(_lowercase , mid + 1 , _lowercase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 170 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """AutoImageProcessor"""
__snake_case = """AutoTokenizer"""
def __init__( self: List[Any] , a: Dict , a: Union[str, Any] ):
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : int = self.image_processor
def __call__( self: Dict , a: int=None , a: Any=None , a: Dict=None , **a: Tuple ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCamelCase : Dict = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if images is not None:
__lowerCamelCase : List[str] = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
__lowerCamelCase : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self: Optional[int] , *a: int , **a: str ):
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self: str , *a: Union[str, Any] , **a: List[Any] ):
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self: Any ):
return ["input_ids", "attention_mask", "pixel_values"]
| 669 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowercase_ ( SCREAMING_SNAKE_CASE : ndarray ):
"""simple docstring"""
return np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , *,
__SCREAMING_SNAKE_CASE = np.inf , __SCREAMING_SNAKE_CASE = "linear" , __SCREAMING_SNAKE_CASE = 0.0 , ) -> None:
"""simple docstring"""
snake_case__ : int =regularization
snake_case__ : Tuple =gamma
if kernel == "linear":
snake_case__ : str =self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
snake_case__ : Dict =self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
snake_case__ : Optional[int] =f'''Unknown kernel: {kernel}'''
raise ValueError(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] =observations
snake_case__ : Union[str, Any] =classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((snake_case__), ) : Dict =np.shape(__SCREAMING_SNAKE_CASE )
def to_minimize(__SCREAMING_SNAKE_CASE ) -> float:
snake_case__ : List[str] =0
((snake_case__), ) : int =np.shape(__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =LinearConstraint(__SCREAMING_SNAKE_CASE , 0 , 0 )
snake_case__ : int =Bounds(0 , self.regularization )
snake_case__ : Union[str, Any] =minimize(
__SCREAMING_SNAKE_CASE , np.ones(__SCREAMING_SNAKE_CASE ) , bounds=__SCREAMING_SNAKE_CASE , constraints=[ly_contraint] ).x
snake_case__ : Optional[int] =l_star
# calculating mean offset of separation plane to points
snake_case__ : Optional[Any] =0
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
snake_case__ : int =s / n
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
snake_case__ : int =sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __SCREAMING_SNAKE_CASE )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 381 | 0 |
import requests
from bsa import BeautifulSoup
def A_ ( a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = BeautifulSoup(requests.get(a , params=a ).content , 'html.parser' )
SCREAMING_SNAKE_CASE_ : List[str] = soup.find('div' , attrs={'class': 'gs_ri'} )
SCREAMING_SNAKE_CASE_ : Optional[Any] = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 20_18,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 353 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = TaConfig.from_json_file(a )
print(f"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE_ : Tuple = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tf_weights_in_ta(a , a , a )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a )
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 353 | 1 |
"""simple docstring"""
import os
from distutils.util import strtobool
def _snake_case ( _snake_case : Tuple , _snake_case : str ) -> Any:
'''simple docstring'''
for e in env_keys:
_A = int(os.environ.get(_snake_case , -1 ) )
if val >= 0:
return val
return default
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Tuple=False ) -> Any:
'''simple docstring'''
_A = os.environ.get(_snake_case , str(_snake_case ) )
return strtobool(_snake_case ) == 1 # As its name indicates `strtobool` actually returns an int...
def _snake_case ( _snake_case : List[Any] , _snake_case : Optional[Any]="no" ) -> Optional[int]:
'''simple docstring'''
_A = os.environ.get(_snake_case , str(_snake_case ) )
return value
| 7 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : Dict ,__UpperCamelCase : Dict=1e-1_2 ):
lowerCAmelCase_ : Optional[Any] = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(__UpperCamelCase ,axis=1 ) ,a_min=__UpperCamelCase ) ).T
lowerCAmelCase_ : int = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(__UpperCamelCase ,axis=1 ) ,a_min=__UpperCamelCase ) ).T
return jnp.matmul(__UpperCamelCase ,norm_emb_a.T )
class __snake_case ( nn.Module ):
_a = 42
_a = jnp.floataa
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : List[str] = FlaxCLIPVisionModule(self.config.vision_config)
lowerCAmelCase_ : Union[str, Any] = nn.Dense(self.config.projection_dim , use_bias=A_ , dtype=self.dtype)
lowerCAmelCase_ : List[str] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (1_7, self.config.projection_dim))
lowerCAmelCase_ : Optional[int] = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim))
lowerCAmelCase_ : int = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (1_7,))
lowerCAmelCase_ : str = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,))
def __call__( self : Optional[Any] , A_ : Tuple):
lowerCAmelCase_ : Tuple = self.vision_model(A_)[1]
lowerCAmelCase_ : Any = self.visual_projection(A_)
lowerCAmelCase_ : List[Any] = jax_cosine_distance(A_ , self.special_care_embeds)
lowerCAmelCase_ : Any = jax_cosine_distance(A_ , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase_ : int = 0.0
lowerCAmelCase_ : List[Any] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase_ : str = jnp.round(A_ , 3)
lowerCAmelCase_ : int = jnp.any(special_scores > 0 , axis=1 , keepdims=A_)
# Use a lower threshold if an image has any special care concept
lowerCAmelCase_ : Any = is_special_care * 0.01
lowerCAmelCase_ : Dict = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase_ : str = jnp.round(A_ , 3)
lowerCAmelCase_ : List[str] = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class __snake_case ( UpperCamelCase_ ):
_a = CLIPConfig
_a = '''clip_input'''
_a = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Optional[Any] , A_ : CLIPConfig , A_ : Optional[Tuple] = None , A_ : int = 0 , A_ : jnp.dtype = jnp.floataa , A_ : bool = True , **A_ : Union[str, Any] , ):
if input_shape is None:
lowerCAmelCase_ : List[Any] = (1, 2_2_4, 2_2_4, 3)
lowerCAmelCase_ : List[Any] = self.module_class(config=A_ , dtype=A_ , **A_)
super().__init__(A_ , A_ , input_shape=A_ , seed=A_ , dtype=A_ , _do_init=_do_init)
def UpperCAmelCase__ ( self : List[str] , A_ : jax.random.KeyArray , A_ : Tuple , A_ : FrozenDict = None):
# init input tensor
lowerCAmelCase_ : Optional[Any] = jax.random.normal(A_ , A_)
lowerCAmelCase_ , lowerCAmelCase_ : Any = jax.random.split(A_)
lowerCAmelCase_ : Union[str, Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
lowerCAmelCase_ : str = self.module.init(A_ , A_)['''params''']
return random_params
def __call__( self : Any , A_ : str , A_ : dict = None , ):
lowerCAmelCase_ : Dict = jnp.transpose(A_ , (0, 2, 3, 1))
return self.module.apply(
{'''params''': params or self.params} , jnp.array(A_ , dtype=jnp.floataa) , rngs={} , )
| 171 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( _lowercase , unittest.TestCase ):
a = XLMTokenizer
a = False
def lowerCamelCase_ ( self: Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCamelCase__ : Any = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Optional[Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(UpperCamelCase__ ) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Tuple = """lower newer"""
lowerCamelCase__ : List[Any] = """lower newer"""
return input_text, output_text
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Union[str, Any] = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase__ : str = """lower"""
lowerCamelCase__ : int = ["""low""", """er</w>"""]
lowerCamelCase__ : str = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokens + ["""<unk>"""]
lowerCamelCase__ : str = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : List[str] = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
lowerCamelCase__ : Tuple = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : Any = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 631 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A : Dict ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 631 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class A :
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
if len(lowerCamelCase__ ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
lowercase__ = list(lowerCamelCase__ )
lowercase__ = degree
def __add__( self , lowerCamelCase__ ) -> Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
lowercase__ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCamelCase__ )
else:
lowercase__ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCamelCase__ )
def __sub__( self , lowerCamelCase__ ) -> Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , lowerCamelCase__ ) -> Polynomial:
'''simple docstring'''
lowercase__ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> int | float:
'''simple docstring'''
lowercase__ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
'''simple docstring'''
lowercase__ = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCamelCase__ )
return polynomial
def __repr__( self ) -> str:
'''simple docstring'''
return self.__str__()
def A__ ( self ) -> Polynomial:
'''simple docstring'''
lowercase__ = [0] * self.degree
for i in range(self.degree ):
lowercase__ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ = 0 ) -> Polynomial:
'''simple docstring'''
lowercase__ = [0] * (self.degree + 2)
lowercase__ = constant
for i in range(self.degree + 1 ):
lowercase__ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCamelCase__ )
def __eq__( self , lowerCamelCase__ ) -> bool:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , lowerCamelCase__ ) -> bool:
'''simple docstring'''
return not self.__eq__(lowerCamelCase__ )
| 325 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A = 16
__A = 32
def _A ( lowercase__ , lowercase__ = 16 ):
lowercase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowercase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A = mocked_dataloaders # noqa: F811
def _A ( lowercase__ , lowercase__ ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
lowercase__ = 2
# Initialize accelerator
lowercase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["""lr"""]
lowercase__ = int(config["""num_epochs"""] )
lowercase__ = int(config["""seed"""] )
lowercase__ = int(config["""batch_size"""] )
lowercase__ = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase__ )
def inner_training_loop(lowercase__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=lowercase__ )
lowercase__ , lowercase__ = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=100 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ = model(**lowercase__ )
lowercase__ = outputs.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**lowercase__ )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowercase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def _A ( ):
lowercase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase__ = parser.parse_args()
lowercase__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 325 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "imagegpt"
_lowerCAmelCase = ["past_key_values"]
_lowerCAmelCase = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , _lowercase=512 + 1 , _lowercase=32 * 32 , _lowercase=512 , _lowercase=24 , _lowercase=8 , _lowercase=None , _lowercase="quick_gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=False , **_lowercase , ):
'''simple docstring'''
__a : Tuple = vocab_size
__a : Optional[Any] = n_positions
__a : int = n_embd
__a : Union[str, Any] = n_layer
__a : Optional[int] = n_head
__a : Optional[Any] = n_inner
__a : int = activation_function
__a : List[str] = resid_pdrop
__a : Optional[Any] = embd_pdrop
__a : List[str] = attn_pdrop
__a : Tuple = layer_norm_epsilon
__a : Optional[Any] = initializer_range
__a : Tuple = scale_attn_weights
__a : Optional[Any] = use_cache
__a : str = scale_attn_by_inverse_layer_idx
__a : int = reorder_and_upcast_attn
__a : Union[str, Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def lowerCAmelCase__(self , _lowercase , _lowercase = 1 , _lowercase = -1 , _lowercase = False , _lowercase = None , _lowercase = 3 , _lowercase = 32 , _lowercase = 32 , ):
'''simple docstring'''
__a : Union[str, Any] = self._generate_dummy_images(_lowercase , _lowercase , _lowercase , _lowercase )
__a : Union[str, Any] = dict(preprocessor(images=_lowercase , return_tensors=_lowercase ) )
return inputs
| 63 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__a : Any = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__snake_case = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
__snake_case = """hopper-medium-v2"""
__snake_case = gym.make(env_name)
__snake_case = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
__snake_case = env.reset()
__snake_case = 0
__snake_case = 0
__snake_case = 10_00
__snake_case = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__snake_case = pipeline(obs, planning_horizon=32)
# execute action in environment
__snake_case ,__snake_case ,__snake_case ,__snake_case = env.step(denorm_actions)
__snake_case = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
__snake_case = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 472 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 472 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 344 |
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Optional[int]=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
if decoder_head_mask is None:
lowerCAmelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
if cross_attn_head_mask is None:
lowerCAmelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=16 , A_=2 , A_=4 , A_=4 , A_="relu" , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.0 , A_=20 , A_=2 , A_=1 , A_=0 , ) -> Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = eos_token_id
lowerCAmelCase = pad_token_id
lowerCAmelCase = bos_token_id
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = self.eos_token_id # Eos Token
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase = self.get_config()
lowerCAmelCase = prepare_mam_aaa_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def __snake_case ( self ) -> Optional[Any]:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __snake_case ( self ) -> str:
lowerCAmelCase, lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __snake_case ( self , A_ , A_ ) -> Tuple:
lowerCAmelCase = MaMaaaModel(config=A_ ).get_decoder().to(A_ ).eval()
lowerCAmelCase = inputs_dict["""input_ids"""]
lowerCAmelCase = inputs_dict["""attention_mask"""]
lowerCAmelCase = inputs_dict["""head_mask"""]
# first forward pass
lowerCAmelCase = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
lowerCAmelCase, lowerCAmelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCAmelCase = model(A_ , attention_mask=A_ )["""last_hidden_state"""]
lowerCAmelCase = model(A_ , attention_mask=A_ , past_key_values=A_ )[
"""last_hidden_state"""
]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-2 ) )
def __snake_case ( self , A_ , A_ ) -> Any:
lowerCAmelCase = MaMaaaModel(config=A_ ).to(A_ ).eval()
lowerCAmelCase = model(**A_ )
lowerCAmelCase = outputs.encoder_last_hidden_state
lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(A_ )
lowerCAmelCase = MaMaaaEncoder.from_pretrained(A_ ).to(A_ )
lowerCAmelCase = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(A_ )
lowerCAmelCase = MaMaaaDecoder.from_pretrained(A_ ).to(A_ )
lowerCAmelCase = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=A_ , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : int = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Tuple = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase : Any = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
UpperCAmelCase : str = True
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Dict = False
UpperCAmelCase : Dict = False
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ ) -> int:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __snake_case ( self ) -> int:
lowerCAmelCase = MaMaaaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ )
def __snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
lowerCAmelCase, lowerCAmelCase = model_class.from_pretrained(A_ , output_loading_info=A_ )
self.assertEqual(info["""missing_keys"""] , [] )
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*A_ )
def __snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A_ )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCAmelCase = model_class(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = copy.deepcopy(self._prepare_for_class(A_ , A_ ) )
if not self.is_encoder_decoder:
lowerCAmelCase = inputs["""input_ids"""]
del inputs["input_ids"]
else:
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs.get("""decoder_input_ids""" , A_ )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , A_ )
lowerCAmelCase = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCAmelCase = wte(A_ )
else:
lowerCAmelCase = wte(A_ )
lowerCAmelCase = wte(A_ )
with torch.no_grad():
model(**A_ )[0]
def __snake_case ( self ) -> str:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = input_dict["""input_ids"""]
lowerCAmelCase = input_ids.ne(1 ).to(A_ )
lowerCAmelCase = MaMaaaForConditionalGeneration(A_ ).eval().to(A_ )
if torch_device == "cuda":
model.half()
model.generate(A_ , attention_mask=A_ )
model.generate(num_beams=4 , do_sample=A_ , early_stopping=A_ , num_return_sequences=3 )
def _snake_case ( _SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
UpperCAmelCase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ) -> List[Any]:
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def __snake_case ( self ) -> str:
lowerCAmelCase = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(A_ )
lowerCAmelCase = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
lowerCAmelCase = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
lowerCAmelCase = prepare_mam_aaa_inputs_dict(model.config , A_ , A_ )
with torch.no_grad():
lowerCAmelCase = model(**A_ )[0]
lowerCAmelCase = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , A_ )
# change to expected output here
lowerCAmelCase = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=A_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=A_ ) )
def __snake_case ( self ) -> Dict:
lowerCAmelCase = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(A_ )
# change to intended input
lowerCAmelCase = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
lowerCAmelCase = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
lowerCAmelCase = prepare_mam_aaa_inputs_dict(model.config , A_ , A_ )
with torch.no_grad():
lowerCAmelCase = model(**A_ )[0]
lowerCAmelCase = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , A_ )
# change to expected output here
lowerCAmelCase = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=A_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=A_ ) )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(A_ )
lowerCAmelCase = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
lowerCAmelCase = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCAmelCase = tokenizer(A_ , padding=A_ , return_tensors="""pt""" )
lowerCAmelCase = model.generate(
input_ids=dct["""input_ids"""].to(A_ ) , attention_mask=dct["""attention_mask"""].to(A_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
lowerCAmelCase = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
lowerCAmelCase = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=A_ , skip_special_tokens=A_ )
assert generated == expected_en | 344 | 1 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowercase ( UpperCamelCase__ : int ):
return getitem, k
def _lowercase ( UpperCamelCase__ : Tuple, UpperCamelCase__ : List[str] ):
return setitem, k, v
def _lowercase ( UpperCamelCase__ : Tuple ):
return delitem, k
def _lowercase ( UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[Any], *UpperCamelCase__ : Union[str, Any] ):
try:
return fun(UpperCamelCase__, *UpperCamelCase__ ), None
except Exception as e:
return None, e
UpperCAmelCase_ : int = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
UpperCAmelCase_ : List[Any] = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
UpperCAmelCase_ : int = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
UpperCAmelCase_ : Tuple = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
UpperCAmelCase_ : List[str] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
UpperCAmelCase_ : Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'operations', (
pytest.param(_add_items, id='add items' ),
pytest.param(_overwrite_items, id='overwrite items' ),
pytest.param(_delete_items, id='delete items' ),
pytest.param(_access_absent_items, id='access absent items' ),
pytest.param(_add_with_resize_up, id='add with resize up' ),
pytest.param(_add_with_resize_down, id='add with resize down' ),
), )
def _lowercase ( UpperCamelCase__ : Union[str, Any] ):
__A : Optional[Any] = HashMap(initial_block_size=4 )
__A : List[Any] = {}
for _, (fun, *args) in enumerate(UpperCamelCase__ ):
__A ,__A : Tuple = _run_operation(UpperCamelCase__, UpperCamelCase__, *UpperCamelCase__ )
__A ,__A : int = _run_operation(UpperCamelCase__, UpperCamelCase__, *UpperCamelCase__ )
assert my_res == py_res
assert str(UpperCamelCase__ ) == str(UpperCamelCase__ )
assert set(UpperCamelCase__ ) == set(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
assert set(my.items() ) == set(py.items() )
def _lowercase ( ):
def is_public(UpperCamelCase__ : str ) -> bool:
return not name.startswith('_' )
__A : Tuple = {name for name in dir({} ) if is_public(UpperCamelCase__ )}
__A : List[Any] = {name for name in dir(HashMap() ) if is_public(UpperCamelCase__ )}
assert dict_public_names > hash_public_names
| 365 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : List[Any] = '''fnet'''
def __init__( self , __lowercase=32_000 , __lowercase=768 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu_new" , __lowercase=0.1 , __lowercase=512 , __lowercase=4 , __lowercase=0.0_2 , __lowercase=1E-12 , __lowercase=False , __lowercase=512 , __lowercase=3 , __lowercase=1 , __lowercase=2 , **__lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__A : Union[str, Any] = vocab_size
__A : Dict = max_position_embeddings
__A : List[str] = hidden_size
__A : Tuple = num_hidden_layers
__A : Optional[int] = intermediate_size
__A : Dict = hidden_act
__A : List[str] = hidden_dropout_prob
__A : str = initializer_range
__A : Dict = type_vocab_size
__A : int = layer_norm_eps
__A : Tuple = use_tpu_fourier_optimizations
__A : Optional[Any] = tpu_short_seq_length
| 365 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
def __init__( self,__lowerCamelCase=0.01,__lowerCamelCase=1000 ):
A__ = p_stop
A__ = max_length
def __iter__( self ):
A__ = 0
A__ = False
while not stop and count < self.max_length:
yield count
count += 1
A__ = random.random() < self.p_stop
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=False,__lowerCamelCase=True ):
A__ = [
BatchSamplerShard(__lowerCamelCase,2,__lowerCamelCase,split_batches=__lowerCamelCase,even_batches=__lowerCamelCase )
for i in range(2 )
]
A__ = [list(__lowerCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCamelCase ) for shard in batch_sampler_shards],[len(__lowerCamelCase ) for e in expected] )
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = BatchSampler(range(24 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase )
A__ = BatchSampler(range(24 ),batch_size=3,drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
A__ = BatchSampler(range(21 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase )
A__ = BatchSampler(range(21 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
A__ = BatchSampler(range(22 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase )
A__ = BatchSampler(range(22 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
A__ = BatchSampler(range(20 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase )
A__ = BatchSampler(range(20 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase )
# Check the shards when the dataset is very small.
A__ = BatchSampler(range(2 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase )
A__ = BatchSampler(range(2 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = BatchSampler(range(24 ),batch_size=4,drop_last=__lowerCamelCase )
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,split_batches=__lowerCamelCase )
A__ = BatchSampler(range(24 ),batch_size=4,drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,split_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
A__ = BatchSampler(range(22 ),batch_size=4,drop_last=__lowerCamelCase )
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,split_batches=__lowerCamelCase )
A__ = BatchSampler(range(22 ),batch_size=4,drop_last=__lowerCamelCase )
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,split_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
A__ = BatchSampler(range(21 ),batch_size=4,drop_last=__lowerCamelCase )
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,split_batches=__lowerCamelCase )
A__ = BatchSampler(range(21 ),batch_size=4,drop_last=__lowerCamelCase )
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,split_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
A__ = BatchSampler(range(2 ),batch_size=4,drop_last=__lowerCamelCase )
A__ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,split_batches=__lowerCamelCase )
A__ = BatchSampler(range(2 ),batch_size=4,drop_last=__lowerCamelCase )
A__ = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,split_batches=__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = BatchSampler(range(24 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,even_batches=__lowerCamelCase )
A__ = BatchSampler(range(24 ),batch_size=3,drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,even_batches=__lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
A__ = BatchSampler(range(21 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,even_batches=__lowerCamelCase )
A__ = BatchSampler(range(21 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
A__ = BatchSampler(range(22 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,even_batches=__lowerCamelCase )
A__ = BatchSampler(range(22 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
A__ = BatchSampler(range(20 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,even_batches=__lowerCamelCase )
A__ = BatchSampler(range(20 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,even_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
A__ = BatchSampler(range(2 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,even_batches=__lowerCamelCase )
A__ = BatchSampler(range(2 ),batch_size=3,drop_last=__lowerCamelCase )
A__ = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,even_batches=__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = BatchSampler(range(24 ),batch_size=4,drop_last=__lowerCamelCase )
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,split_batches=__lowerCamelCase,even_batches=__lowerCamelCase )
A__ = BatchSampler(range(24 ),batch_size=4,drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,split_batches=__lowerCamelCase,even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
A__ = BatchSampler(range(22 ),batch_size=4,drop_last=__lowerCamelCase )
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,split_batches=__lowerCamelCase,even_batches=__lowerCamelCase )
A__ = BatchSampler(range(22 ),batch_size=4,drop_last=__lowerCamelCase )
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,split_batches=__lowerCamelCase,even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
A__ = BatchSampler(range(21 ),batch_size=4,drop_last=__lowerCamelCase )
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,split_batches=__lowerCamelCase,even_batches=__lowerCamelCase )
A__ = BatchSampler(range(21 ),batch_size=4,drop_last=__lowerCamelCase )
A__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,split_batches=__lowerCamelCase,even_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
A__ = BatchSampler(range(2 ),batch_size=4,drop_last=__lowerCamelCase )
A__ = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,split_batches=__lowerCamelCase,even_batches=__lowerCamelCase )
A__ = BatchSampler(range(2 ),batch_size=4,drop_last=__lowerCamelCase )
A__ = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase,__lowerCamelCase,split_batches=__lowerCamelCase,even_batches=__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
A__ = [BatchSamplerShard(__lowerCamelCase,2,__lowerCamelCase,even_batches=__lowerCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ),3 )
self.assertEqual(len(batch_sampler_shards[1] ),2 )
self.assertListEqual(list(batch_sampler_shards[0] ),[[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ),[[3, 4], [9, 10, 11]] )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=False,__lowerCamelCase=2,__lowerCamelCase=False ):
random.seed(__lowerCamelCase )
A__ = list(__lowerCamelCase )
A__ = [
IterableDatasetShard(
__lowerCamelCase,batch_size=__lowerCamelCase,drop_last=__lowerCamelCase,num_processes=__lowerCamelCase,process_index=__lowerCamelCase,split_batches=__lowerCamelCase,)
for i in range(__lowerCamelCase )
]
A__ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCamelCase )
iterable_dataset_lists.append(list(__lowerCamelCase ) )
A__ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
A__ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCamelCase ),len(__lowerCamelCase ) )
self.assertTrue(len(__lowerCamelCase ) % shard_batch_size == 0 )
A__ = []
for idx in range(0,len(__lowerCamelCase ),__lowerCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCamelCase ) < len(__lowerCamelCase ):
reference += reference
self.assertListEqual(__lowerCamelCase,reference[: len(__lowerCamelCase )] )
def UpperCamelCase ( self ):
A__ = 42
A__ = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCamelCase,__lowerCamelCase,batch_size=4,drop_last=__lowerCamelCase,split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase,__lowerCamelCase,batch_size=4,drop_last=__lowerCamelCase,split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase,__lowerCamelCase,batch_size=4,drop_last=__lowerCamelCase,split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase,__lowerCamelCase,batch_size=4,drop_last=__lowerCamelCase,split_batches=__lowerCamelCase )
# Edge case with a very small dataset
A__ = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCamelCase,__lowerCamelCase,batch_size=4,drop_last=__lowerCamelCase,split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase,__lowerCamelCase,batch_size=4,drop_last=__lowerCamelCase,split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase,__lowerCamelCase,batch_size=4,drop_last=__lowerCamelCase,split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase,__lowerCamelCase,batch_size=4,drop_last=__lowerCamelCase,split_batches=__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = BatchSampler(range(16 ),batch_size=4,drop_last=__lowerCamelCase )
A__ = SkipBatchSampler(__lowerCamelCase,2 )
self.assertListEqual(list(__lowerCamelCase ),[[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase ( self ):
A__ = SkipDataLoader(list(range(16 ) ),batch_size=4,skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader],[[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase ( self ):
A__ = DataLoader(list(range(16 ) ),batch_size=4 )
A__ = skip_first_batches(__lowerCamelCase,num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader],[[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase ( self ):
A__ = DataLoaderShard(list(range(16 ) ),batch_size=4 )
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
def UpperCamelCase ( self ):
Accelerator()
A__ = DataLoaderDispatcher(range(16 ),batch_size=4 )
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
| 702 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__: Any = logging.get_logger(__name__)
a__: List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''trocr'''
__SCREAMING_SNAKE_CASE = ['''past_key_values''']
__SCREAMING_SNAKE_CASE = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self,__lowerCamelCase=5_0265,__lowerCamelCase=1024,__lowerCamelCase=12,__lowerCamelCase=16,__lowerCamelCase=4096,__lowerCamelCase="gelu",__lowerCamelCase=512,__lowerCamelCase=0.1,__lowerCamelCase=0.0,__lowerCamelCase=0.0,__lowerCamelCase=2,__lowerCamelCase=0.02,__lowerCamelCase=0.0,__lowerCamelCase=True,__lowerCamelCase=False,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=1,__lowerCamelCase=0,__lowerCamelCase=2,**__lowerCamelCase,):
A__ = vocab_size
A__ = d_model
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = decoder_ffn_dim
A__ = activation_function
A__ = max_position_embeddings
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = init_std
A__ = decoder_layerdrop
A__ = use_cache
A__ = scale_embedding
A__ = use_learned_position_embeddings
A__ = layernorm_embedding
super().__init__(
pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase,decoder_start_token_id=__lowerCamelCase,**__lowerCamelCase,)
| 212 | 0 |
import os
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : str = os.path.join(os.path.dirname(snake_case__ ) , 'num.txt' )
with open(snake_case__ ) as file_hand:
return str(sum(int(snake_case__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class a ( unittest.TestCase ):
@require_torch
def UpperCamelCase ( self : str ) -> str:
lowerCamelCase_ = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
lowerCamelCase_ = load_dataset('ashraq/esc50' )
lowerCamelCase_ = dataset['train']['audio'][-1]['array']
lowerCamelCase_ = audio_classifier(__SCREAMING_SNAKE_CASE , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase_ = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
lowerCamelCase_ = load_dataset('ashraq/esc50' )
lowerCamelCase_ = dataset['train']['audio'][-1]['array']
lowerCamelCase_ = audio_classifier(__SCREAMING_SNAKE_CASE , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
] , )
lowerCamelCase_ = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
lowerCamelCase_ = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def UpperCamelCase ( self : Optional[int] ) -> int:
pass
| 549 | 0 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def A_ ( _lowercase, _lowercase=0.999, _lowercase="cosine", ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowercase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowercase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
snake_case_ :Dict = []
for i in range(_lowercase ):
snake_case_ :Union[str, Any] = i / num_diffusion_timesteps
snake_case_ :int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowercase ) / alpha_bar_fn(_lowercase ), _lowercase ) )
return torch.tensor(_lowercase, dtype=torch.floataa )
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
_A : Any = [e.name for e in KarrasDiffusionSchedulers]
_A : List[Any] = 2
@register_to_config
def __init__( self: str , snake_case: int = 1_000 , snake_case: float = 0.0_0_0_8_5 , snake_case: float = 0.0_1_2 , snake_case: str = "linear" , snake_case: Optional[Union[np.ndarray, List[float]]] = None , snake_case: str = "epsilon" , snake_case: Optional[bool] = False , snake_case: Optional[bool] = False , snake_case: float = 1.0 , snake_case: str = "linspace" , snake_case: int = 0 , ) -> Optional[Any]:
if trained_betas is not None:
snake_case_ :str = torch.tensor(snake_case , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case_ :str = torch.linspace(snake_case , snake_case , snake_case , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ :str = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ :Optional[int] = betas_for_alpha_bar(snake_case , alpha_transform_type="""cosine""" )
elif beta_schedule == "exp":
snake_case_ :List[str] = betas_for_alpha_bar(snake_case , alpha_transform_type="""exp""" )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
snake_case_ :Union[str, Any] = 1.0 - self.betas
snake_case_ :Optional[Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case , snake_case , snake_case )
snake_case_ :int = use_karras_sigmas
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: List[Any] , snake_case: Union[str, Any]=None ) -> int:
if schedule_timesteps is None:
snake_case_ :Union[str, Any] = self.timesteps
snake_case_ :List[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
snake_case_ :int = 1 if len(snake_case ) > 1 else 0
else:
snake_case_ :List[str] = timestep.cpu().item() if torch.is_tensor(snake_case ) else timestep
snake_case_ :Optional[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase_ ( self: str ) -> Optional[int]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase_ ( self: int , snake_case: torch.FloatTensor , snake_case: Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
snake_case_ :int = self.index_for_timestep(snake_case )
snake_case_ :Tuple = self.sigmas[step_index]
snake_case_ :Any = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase_ ( self: Tuple , snake_case: int , snake_case: Union[str, torch.device] = None , snake_case: Optional[int] = None , ) -> str:
snake_case_ :List[str] = num_inference_steps
snake_case_ :Any = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case_ :Dict = np.linspace(0 , num_train_timesteps - 1 , snake_case , dtype=snake_case )[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case_ :Dict = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case_ :Any = (np.arange(0 , snake_case ) * step_ratio).round()[::-1].copy().astype(snake_case )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case_ :List[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case_ :Optional[Any] = (np.arange(snake_case , 0 , -step_ratio )).round().copy().astype(snake_case )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
snake_case_ :List[Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
snake_case_ :int = np.log(snake_case )
snake_case_ :int = np.interp(snake_case , np.arange(0 , len(snake_case ) ) , snake_case )
if self.config.use_karras_sigmas:
snake_case_ :Tuple = self._convert_to_karras(in_sigmas=snake_case , num_inference_steps=self.num_inference_steps )
snake_case_ :Dict = np.array([self._sigma_to_t(snake_case , snake_case ) for sigma in sigmas] )
snake_case_ :List[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
snake_case_ :List[Any] = torch.from_numpy(snake_case ).to(device=snake_case )
snake_case_ :Dict = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
snake_case_ :Any = torch.from_numpy(snake_case )
snake_case_ :List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(snake_case ).startswith("""mps""" ):
# mps does not support float64
snake_case_ :Union[str, Any] = timesteps.to(snake_case , dtype=torch.floataa )
else:
snake_case_ :int = timesteps.to(device=snake_case )
# empty dt and derivative
snake_case_ :List[Any] = None
snake_case_ :Tuple = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case_ :str = defaultdict(snake_case )
def lowerCAmelCase_ ( self: Any , snake_case: Union[str, Any] , snake_case: str ) -> Optional[Any]:
# get log sigma
snake_case_ :List[str] = np.log(snake_case )
# get distribution
snake_case_ :List[str] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
snake_case_ :Optional[Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
snake_case_ :List[Any] = low_idx + 1
snake_case_ :Union[str, Any] = log_sigmas[low_idx]
snake_case_ :Optional[int] = log_sigmas[high_idx]
# interpolate sigmas
snake_case_ :int = (low - log_sigma) / (low - high)
snake_case_ :List[Any] = np.clip(snake_case , 0 , 1 )
# transform interpolation to time range
snake_case_ :Any = (1 - w) * low_idx + w * high_idx
snake_case_ :str = t.reshape(sigma.shape )
return t
def lowerCAmelCase_ ( self: int , snake_case: torch.FloatTensor , snake_case: Optional[int] ) -> torch.FloatTensor:
snake_case_ :float = in_sigmas[-1].item()
snake_case_ :float = in_sigmas[0].item()
snake_case_ :Optional[Any] = 7.0 # 7.0 is the value used in the paper
snake_case_ :Any = np.linspace(0 , 1 , snake_case )
snake_case_ :List[Any] = sigma_min ** (1 / rho)
snake_case_ :Optional[Any] = sigma_max ** (1 / rho)
snake_case_ :Any = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
return self.dt is None
def lowerCAmelCase_ ( self: str , snake_case: Union[torch.FloatTensor, np.ndarray] , snake_case: Union[float, torch.FloatTensor] , snake_case: Union[torch.FloatTensor, np.ndarray] , snake_case: bool = True , ) -> Union[SchedulerOutput, Tuple]:
snake_case_ :Optional[int] = self.index_for_timestep(snake_case )
# advance index counter by 1
snake_case_ :Union[str, Any] = timestep.cpu().item() if torch.is_tensor(snake_case ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case_ :Tuple = self.sigmas[step_index]
snake_case_ :Any = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
snake_case_ :Tuple = self.sigmas[step_index - 1]
snake_case_ :Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case_ :Union[str, Any] = 0
snake_case_ :List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case_ :Optional[Any] = sigma_hat if self.state_in_first_order else sigma_next
snake_case_ :Dict = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case_ :Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next
snake_case_ :int = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
snake_case_ :List[str] = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
snake_case_ :Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case_ :List[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case_ :Any = sigma_next - sigma_hat
# store for 2nd order step
snake_case_ :List[str] = derivative
snake_case_ :Any = dt
snake_case_ :Any = sample
else:
# 2. 2nd order / Heun's method
snake_case_ :List[str] = (sample - pred_original_sample) / sigma_next
snake_case_ :Dict = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
snake_case_ :int = self.dt
snake_case_ :Optional[Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
snake_case_ :str = None
snake_case_ :int = None
snake_case_ :int = None
snake_case_ :int = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case )
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: torch.FloatTensor , snake_case: torch.FloatTensor , snake_case: torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case_ :int = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case ):
# mps does not support float64
snake_case_ :Union[str, Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
snake_case_ :Dict = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
snake_case_ :Any = self.timesteps.to(original_samples.device )
snake_case_ :int = timesteps.to(original_samples.device )
snake_case_ :List[Any] = [self.index_for_timestep(snake_case , snake_case ) for t in timesteps]
snake_case_ :str = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
snake_case_ :Optional[int] = sigma.unsqueeze(-1 )
snake_case_ :List[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self: str ) -> str:
return self.config.num_train_timesteps
| 717 |
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self: int , snake_case: bool , snake_case: Optional[int] = None , snake_case: Optional[int] = None ) -> Dict:
super().__init__()
snake_case_ :int = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case_ :str = torch.zeros(snake_case , snake_case )
else:
snake_case_ :Optional[int] = None
snake_case_ :Union[str, Any] = torch.nn.Parameter(snake_case )
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : VQModel
_A : CLIPTextModel
_A : CLIPTokenizer
_A : TransformeraDModel
_A : LearnedClassifierFreeSamplingEmbeddings
_A : VQDiffusionScheduler
def __init__( self: Any , snake_case: VQModel , snake_case: CLIPTextModel , snake_case: CLIPTokenizer , snake_case: TransformeraDModel , snake_case: VQDiffusionScheduler , snake_case: LearnedClassifierFreeSamplingEmbeddings , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vqvae=snake_case , transformer=snake_case , text_encoder=snake_case , tokenizer=snake_case , scheduler=snake_case , learned_classifier_free_sampling_embeddings=snake_case , )
def lowerCAmelCase_ ( self: Tuple , snake_case: Union[str, Any] , snake_case: List[Any] , snake_case: List[str] ) -> Any:
snake_case_ :List[str] = len(snake_case ) if isinstance(snake_case , snake_case ) else 1
# get prompt text embeddings
snake_case_ :List[str] = self.tokenizer(
snake_case , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
snake_case_ :List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ :int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
snake_case_ :Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case_ :Any = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case_ :int = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=snake_case )
# duplicate text embeddings for each generation per prompt
snake_case_ :str = prompt_embeds.repeat_interleave(snake_case , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case_ :Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case_ :Any = negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case , 1 , 1 )
else:
snake_case_ :Any = [""""""] * batch_size
snake_case_ :Optional[Any] = text_input_ids.shape[-1]
snake_case_ :Dict = self.tokenizer(
snake_case , padding="""max_length""" , max_length=snake_case , truncation=snake_case , return_tensors="""pt""" , )
snake_case_ :str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
snake_case_ :Union[str, Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=snake_case )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ :Tuple = negative_prompt_embeds.shape[1]
snake_case_ :int = negative_prompt_embeds.repeat(1 , snake_case , 1 )
snake_case_ :int = negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ :str = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Dict , snake_case: Union[str, List[str]] , snake_case: int = 100 , snake_case: float = 5.0 , snake_case: float = 1.0 , snake_case: int = 1 , snake_case: Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case: Optional[torch.FloatTensor] = None , snake_case: Optional[str] = "pil" , snake_case: bool = True , snake_case: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case: int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(snake_case , snake_case ):
snake_case_ :Any = 1
elif isinstance(snake_case , snake_case ):
snake_case_ :int = len(snake_case )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(snake_case )}""" )
snake_case_ :Tuple = batch_size * num_images_per_prompt
snake_case_ :Optional[Any] = guidance_scale > 1.0
snake_case_ :Dict = self._encode_prompt(snake_case , snake_case , snake_case )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(snake_case )}.""" )
# get the initial completely masked latents unless the user supplied it
snake_case_ :List[str] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case_ :Tuple = self.transformer.num_vector_embeds - 1
snake_case_ :Optional[int] = torch.full(snake_case , snake_case ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
snake_case_ :str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case , device=self.device )
snake_case_ :Optional[Any] = self.scheduler.timesteps.to(self.device )
snake_case_ :List[Any] = latents
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the sample if we are doing classifier free guidance
snake_case_ :List[Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case_ :Any = self.transformer(snake_case , encoder_hidden_states=snake_case , timestep=snake_case ).sample
if do_classifier_free_guidance:
snake_case_, snake_case_ :Optional[Any] = model_output.chunk(2 )
snake_case_ :Any = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case , dim=1 , keepdim=snake_case )
snake_case_ :str = self.truncate(snake_case , snake_case )
# remove `log(0)`'s (`-inf`s)
snake_case_ :List[str] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ :Any = self.scheduler.step(snake_case , timestep=snake_case , sample=snake_case , generator=snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
snake_case_ :Optional[int] = self.vqvae.config.vq_embed_dim
snake_case_ :Tuple = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case_ :List[Any] = self.vqvae.quantize.get_codebook_entry(snake_case , shape=snake_case )
snake_case_ :Dict = self.vqvae.decode(snake_case , force_not_quantize=snake_case ).sample
snake_case_ :List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ :Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ :Any = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
def lowerCAmelCase_ ( self: int , snake_case: torch.FloatTensor , snake_case: float ) -> torch.FloatTensor:
snake_case_, snake_case_ :List[Any] = torch.sort(snake_case , 1 , descending=snake_case )
snake_case_ :Optional[int] = torch.exp(snake_case )
snake_case_ :int = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case_ :Union[str, Any] = torch.full_like(keep_mask[:, 0:1, :] , snake_case )
snake_case_ :List[str] = torch.cat((all_true, keep_mask) , dim=1 )
snake_case_ :List[str] = keep_mask[:, :-1, :]
snake_case_ :str = keep_mask.gather(1 , indices.argsort(1 ) )
snake_case_ :int = log_p_x_0.clone()
snake_case_ :List[Any] = -torch.inf # -inf = log(0)
return rv
| 310 | 0 |
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 598 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class a_ ( unittest.TestCase ):
def __init__( self : Any , UpperCAmelCase__ : Optional[Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = parent
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
return {}
def a_ ( ) -> int:
"""simple docstring"""
snake_case : Any = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
snake_case : Tuple = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class a_ ( a , unittest.TestCase ):
A__ : List[Any] = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : int = MarkupLMFeatureExtractionTester(self )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# Initialize feature_extractor
snake_case : List[Any] = self.feature_extraction_class()
# Test not batched input
snake_case : List[str] = get_html_strings()[0]
snake_case : Any = feature_extractor(UpperCAmelCase__ )
# fmt: off
snake_case : List[Any] = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
snake_case : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , UpperCAmelCase__ )
self.assertEqual(encoding.xpaths , UpperCAmelCase__ )
# Test batched
snake_case : List[str] = get_html_strings()
snake_case : Optional[Any] = feature_extractor(UpperCAmelCase__ )
# fmt: off
snake_case : List[Any] = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
snake_case : Union[str, Any] = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , UpperCAmelCase__ )
self.assertEqual(encoding.xpaths , UpperCAmelCase__ )
| 598 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A=13 ,_A=7 ,_A=True ,_A=True ,_A=True ,_A=True ,_A=99 ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=16 ,_A=2 ,_A=0.0_2 ,_A=False ,_A=True ,_A="None" ,_A=3 ,_A=4 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : List[str] = seq_length
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : Optional[Any] = use_input_mask
_lowerCAmelCase : str = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Dict = num_labels
_lowerCAmelCase : Union[str, Any] = num_choices
_lowerCAmelCase : Optional[Any] = relative_attention
_lowerCAmelCase : Tuple = position_biased_input
_lowerCAmelCase : Any = pos_att_type
_lowerCAmelCase : List[Any] = scope
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
_lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Tuple = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices )
_lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) ,[] )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = DebertaVaModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[Any] = model(_A ,attention_mask=_A ,token_type_ids=_A )[0]
_lowerCAmelCase : Tuple = model(_A ,token_type_ids=_A )[0]
_lowerCAmelCase : Union[str, Any] = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = DebertaVaForMaskedLM(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : List[Any] = DebertaVaForSequenceClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : List[Any] = model(_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = self.num_labels
_lowerCAmelCase : Dict = DebertaVaForTokenClassification(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = DebertaVaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : str = model(
_A ,attention_mask=_A ,token_type_ids=_A ,start_positions=_A ,end_positions=_A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = DebertaVaForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCAmelCase : Dict = model(
_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : Tuple = config_and_inputs
_lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = DebertaVaModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
_lowerCAmelCase : Any = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_lowerCAmelCase : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Tuple = model(_A ,attention_mask=_A )[0]
# compare the actual values for a slice.
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,_A ,atol=1E-4 ) ,F"""{output[:, 1:4, 1:4]}""" )
| 713 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 | 0 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def snake_case_ ( A_ : Optional[int], A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
_lowerCamelCase : Dict = Image.open(requests.get(A_, stream=A_ ).raw ).convert('''RGB''' )
_lowerCamelCase : Optional[int] = transforms.Compose(
[
transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711) ),
] )
_lowerCamelCase : List[Any] = transform(A_ ).unsqueeze(0 ).to(A_ )
return image
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
if "visual_encoder" in key:
_lowerCamelCase : Dict = re.sub('''visual_encoder*''', '''vision_model.encoder''', A_ )
if "blocks" in key:
_lowerCamelCase : Tuple = re.sub(R'''blocks''', '''layers''', A_ )
if "attn" in key:
_lowerCamelCase : str = re.sub(R'''attn''', '''self_attn''', A_ )
if "norm1" in key:
_lowerCamelCase : Optional[int] = re.sub(R'''norm1''', '''layer_norm1''', A_ )
if "norm2" in key:
_lowerCamelCase : int = re.sub(R'''norm2''', '''layer_norm2''', A_ )
if "encoder.norm" in key:
_lowerCamelCase : List[str] = re.sub(R'''encoder.norm''', '''post_layernorm''', A_ )
if "encoder.patch_embed.proj" in key:
_lowerCamelCase : str = re.sub(R'''encoder.patch_embed.proj''', '''embeddings.patch_embedding''', A_ )
if "encoder.pos_embed" in key:
_lowerCamelCase : List[str] = re.sub(R'''encoder.pos_embed''', '''embeddings.position_embedding''', A_ )
if "encoder.cls_token" in key:
_lowerCamelCase : int = re.sub(R'''encoder.cls_token''', '''embeddings.class_embedding''', A_ )
if "self_attn" in key:
_lowerCamelCase : Any = re.sub(R'''self_attn.proj''', '''self_attn.projection''', A_ )
return key
@torch.no_grad()
def snake_case_ ( A_ : Union[str, Any], A_ : Any=None ):
'''simple docstring'''
if config_path is not None:
_lowerCamelCase : str = BlipConfig.from_pretrained(A_ )
else:
_lowerCamelCase : Any = BlipConfig(projection_dim=5_12, text_config={}, vision_config={} )
_lowerCamelCase : Optional[int] = BlipForConditionalGeneration(A_ ).eval()
_lowerCamelCase : Union[str, Any] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
_lowerCamelCase : Tuple = blip_decoder(pretrained=A_, image_size=3_84, vit='''base''' )
_lowerCamelCase : str = pt_model.eval()
_lowerCamelCase : Tuple = pt_model.state_dict()
for key in modified_state_dict.copy():
_lowerCamelCase : int = modified_state_dict.pop(A_ )
_lowerCamelCase : int = rename_key(A_ )
_lowerCamelCase : List[str] = value
hf_model.load_state_dict(A_ )
_lowerCamelCase : Union[str, Any] = 3_84
_lowerCamelCase : Optional[int] = load_demo_image(image_size=A_, device='''cpu''' )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_lowerCamelCase : Optional[int] = tokenizer(['''a picture of'''] ).input_ids
_lowerCamelCase : int = hf_model.generate(A_, A_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
_lowerCamelCase : Dict = hf_model.generate(A_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(A_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_lowerCamelCase : Union[str, Any] = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
_lowerCamelCase : Union[str, Any] = blip_vqa(pretrained=A_, image_size=A_, vit='''base''' )
vqa_model.eval()
_lowerCamelCase : Any = vqa_model.state_dict()
for key in modified_state_dict.copy():
_lowerCamelCase : Any = modified_state_dict.pop(A_ )
_lowerCamelCase : Tuple = rename_key(A_ )
_lowerCamelCase : List[str] = value
_lowerCamelCase : List[str] = BlipForQuestionAnswering(A_ )
hf_vqa_model.load_state_dict(A_ )
_lowerCamelCase : Optional[int] = ['''How many dogs are in this image?''']
_lowerCamelCase : Dict = tokenizer(A_, return_tensors='''pt''' ).input_ids
_lowerCamelCase : List[Any] = hf_vqa_model.generate(A_, A_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
_lowerCamelCase : List[str] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
_lowerCamelCase : Optional[int] = blip_itm(pretrained=A_, image_size=A_, vit='''base''' )
itm_model.eval()
_lowerCamelCase : Optional[int] = itm_model.state_dict()
for key in modified_state_dict.copy():
_lowerCamelCase : Any = modified_state_dict.pop(A_ )
_lowerCamelCase : Optional[Any] = rename_key(A_ )
_lowerCamelCase : str = value
_lowerCamelCase : Union[str, Any] = BlipForImageTextRetrieval(A_ )
_lowerCamelCase : int = ['''A picture of a woman with a dog sitting in a beach''']
_lowerCamelCase : Optional[Any] = tokenizer(
A_, return_tensors='''pt''', padding='''max_length''', truncation=A_, max_length=35, ).input_ids
hf_itm_model.load_state_dict(A_ )
hf_itm_model.eval()
_lowerCamelCase : Union[str, Any] = hf_itm_model(A_, A_, use_itm_head=A_ )
_lowerCamelCase : str = hf_itm_model(A_, A_, use_itm_head=A_ )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0], dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 83 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _a ,_a ,_a ,_a ,_a ) -> Union[str, Any]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
UpperCAmelCase_: List[str] = TapasConfig.from_json_file(_a )
# set absolute/relative position embeddings parameter
UpperCAmelCase_: List[str] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
UpperCAmelCase_: str = TapasForQuestionAnswering(config=_a )
elif task == "WTQ":
# run_task_main.py hparams
UpperCAmelCase_: Any = 4
UpperCAmelCase_: List[str] = True
# hparam_utils.py hparams
UpperCAmelCase_: Dict = 0.664_694
UpperCAmelCase_: int = 0.207_951
UpperCAmelCase_: int = 0.121_194
UpperCAmelCase_: Any = True
UpperCAmelCase_: Union[str, Any] = True
UpperCAmelCase_: List[str] = False
UpperCAmelCase_: Optional[int] = 0.0_352_513
UpperCAmelCase_: List[str] = TapasForQuestionAnswering(config=_a )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
UpperCAmelCase_: Tuple = 4
UpperCAmelCase_: Dict = False
# hparam_utils.py hparams
UpperCAmelCase_: Optional[int] = 36.4_519
UpperCAmelCase_: List[str] = 0.903_421
UpperCAmelCase_: List[str] = 222.088
UpperCAmelCase_: Any = True
UpperCAmelCase_: str = True
UpperCAmelCase_: Dict = True
UpperCAmelCase_: Union[str, Any] = 0.763_141
UpperCAmelCase_: int = TapasForQuestionAnswering(config=_a )
elif task == "TABFACT":
UpperCAmelCase_: Optional[Any] = TapasForSequenceClassification(config=_a )
elif task == "MLM":
UpperCAmelCase_: str = TapasForMaskedLM(config=_a )
elif task == "INTERMEDIATE_PRETRAINING":
UpperCAmelCase_: List[Any] = TapasModel(config=_a )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_a ,_a ,_a )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_a )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
UpperCAmelCase_: Any = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" ,model_max_length=512 )
tokenizer.save_pretrained(_a )
print("Used relative position embeddings:" ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
) | 137 | 0 |
lowerCamelCase ={"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
lowerCamelCase =["a", "b", "c", "d", "e"]
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : str = start
# add current to visited
visited.append(UpperCamelCase__ )
UpperCamelCase__ : int = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCamelCase__ : int = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# if all neighbors visited add current to sort
sort.append(UpperCamelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
for vertice in vertices:
if vertice not in visited:
UpperCamelCase__ : Optional[int] = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# return sort
return sort
if __name__ == "__main__":
lowerCamelCase =topological_sort("a", [], [])
print(sort)
| 462 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase =logging.get_logger(__name__)
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCamelCase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
UpperCamelCase__ : Tuple = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
UpperCamelCase__ : Optional[Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
UpperCamelCase__ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
UpperCamelCase__ : Tuple = key.split('''.''' )
if attributes[0] == "lm_head":
UpperCamelCase__ : Union[str, Any] = prophet
UpperCamelCase__ : int = prophet_old
else:
UpperCamelCase__ : int = prophet.prophetnet
UpperCamelCase__ : str = prophet_old.model
UpperCamelCase__ : Tuple = False
for attribute in attributes:
if attribute in mapping:
UpperCamelCase__ : List[str] = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
UpperCamelCase__ : Tuple = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : Tuple = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCamelCase__ : Dict = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
UpperCamelCase__ : Optional[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCamelCase__ : Any = old_model.bias
logger.info(f'''{attribute} is initialized''' )
UpperCamelCase__ : Any = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , '''in_proj_weight''' ):
UpperCamelCase__ : Optional[int] = old_model.in_proj_weight.shape[0] // 3
UpperCamelCase__ : Optional[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCamelCase__ : Optional[Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCamelCase__ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCamelCase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCamelCase__ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCamelCase__ : List[str] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCamelCase__ : Tuple = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCamelCase__ : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
UpperCamelCase__ : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
UpperCamelCase__ : List[Any] = True
break
if attribute.isdigit():
UpperCamelCase__ : int = model[int(UpperCamelCase__ )]
UpperCamelCase__ : int = old_model[int(UpperCamelCase__ )]
else:
UpperCamelCase__ : str = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
UpperCamelCase__ : List[str] = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
UpperCamelCase__ : List[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase =parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 462 | 1 |
from typing import Any
import numpy as np
def lowerCAmelCase_ (lowercase__ : np.ndarray ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def lowerCAmelCase_ (lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = v.conjugate().T
lowerCAmelCase__ = v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def lowerCAmelCase_ () -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
lowerCAmelCase__ = np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), f'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
lowerCAmelCase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), f'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 668 |
def lowerCAmelCase_ (lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
for _ in range(lowercase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase__ , lowerCAmelCase__ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 668 | 1 |
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def lowerCamelCase (a_ :Callable) -> Callable:
@wraps(a_)
def _inner_fn(*a_ :Optional[int] , **a_ :str):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , a_ , )
return fn(*a_ , **a_)
return _inner_fn
| 475 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __magic_name__ ( __UpperCAmelCase ):
__A : UNetaDModel
__A : ScoreSdeVeScheduler
def __init__( self : Optional[Any] , snake_case__ : UNetaDModel , snake_case__ : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self : Tuple , snake_case__ : int = 1 , snake_case__ : int = 2_0_0_0 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , **snake_case__ : Optional[Any] , ):
'''simple docstring'''
lowercase :str = self.unet.config.sample_size
lowercase :str = (batch_size, 3, img_size, img_size)
lowercase :Any = self.unet
lowercase :Dict = randn_tensor(snake_case__ , generator=snake_case__ ) * self.scheduler.init_noise_sigma
lowercase :int = sample.to(self.device )
self.scheduler.set_timesteps(snake_case__ )
self.scheduler.set_sigmas(snake_case__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase :Optional[int] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase :str = self.unet(snake_case__ , snake_case__ ).sample
lowercase :Optional[Any] = self.scheduler.step_correct(snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# prediction step
lowercase :Optional[Any] = model(snake_case__ , snake_case__ ).sample
lowercase :Dict = self.scheduler.step_pred(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ )
lowercase , lowercase :Tuple = output.prev_sample, output.prev_sample_mean
lowercase :str = sample_mean.clamp(0 , 1 )
lowercase :int = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase :str = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=snake_case__ )
| 475 | 1 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
"""simple docstring"""
def __init__( self : List[Any] , a_ : List[Any] , a_ : Optional[Any]=3 , a_ : int=32 , a_ : List[str]=3 , a_ : Any=10 , a_ : Optional[int]=[10, 20, 30, 40] , a_ : Tuple=[1, 1, 2, 1] , a_ : int=True , a_ : Union[str, Any]=True , a_ : Optional[int]="relu" , a_ : int=3 , a_ : Tuple=None , ):
"""simple docstring"""
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = embeddings_size
lowerCamelCase__ = hidden_sizes
lowerCamelCase__ = depths
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = hidden_act
lowerCamelCase__ = num_labels
lowerCamelCase__ = scope
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _UpperCamelCase ( self : int , a_ : int , a_ : Any , a_ : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = RegNetModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self : Any , a_ : Tuple , a_ : Optional[int] , a_ : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = RegNetForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
snake_case_ = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _UpperCamelCase ( self : str ):
"""simple docstring"""
lowerCamelCase__ = RegNetModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
pass
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, module in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
def check_hidden_states_output(a_ : Tuple , a_ : List[str] , a_ : Any ):
lowerCamelCase__ = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
lowerCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase__ = layer_type
lowerCamelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = RegNetModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def snake_case ():
'''simple docstring'''
lowerCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self : str ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
lowerCamelCase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 165 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( __A : Any , __A : List[str]=False ):
'''simple docstring'''
snake_case: Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case: str = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCAmelCase_ ( __A : Tuple , __A : List[str] , __A : List[str]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case: Union[str, Any] = ''
else:
snake_case: Optional[int] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case: Optional[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
snake_case: Union[str, Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case: Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case: Dict = in_proj_bias[: config.hidden_size]
snake_case: Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case: Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case: Tuple = in_proj_weight[
-config.hidden_size :, :
]
snake_case: List[str] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __A : Dict ):
'''simple docstring'''
snake_case: Optional[int] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__A , __A )
def lowerCAmelCase_ ( __A : str , __A : List[Any] , __A : List[Any] ):
'''simple docstring'''
snake_case: str = dct.pop(__A )
snake_case: Tuple = val
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case: Tuple = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __A : Union[str, Any] , __A : Tuple ):
'''simple docstring'''
snake_case: Optional[Any] = ViTConfig()
snake_case: Tuple = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case: Optional[int] = True
snake_case: Tuple = int(vit_name[-12:-10] )
snake_case: Tuple = int(vit_name[-9:-6] )
else:
snake_case: Optional[int] = 10_00
snake_case: Optional[Any] = 'huggingface/label-files'
snake_case: Union[str, Any] = 'imagenet-1k-id2label.json'
snake_case: List[Any] = json.load(open(hf_hub_download(__A , __A , repo_type='dataset' ) , 'r' ) )
snake_case: Dict = {int(__A ): v for k, v in idalabel.items()}
snake_case: List[str] = idalabel
snake_case: Optional[int] = {v: k for k, v in idalabel.items()}
snake_case: Any = int(vit_name[-6:-4] )
snake_case: Dict = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
snake_case: Optional[Any] = 1_92
snake_case: Union[str, Any] = 7_68
snake_case: Tuple = 12
snake_case: Union[str, Any] = 3
elif vit_name[9:].startswith('small' ):
snake_case: List[Any] = 3_84
snake_case: Optional[int] = 15_36
snake_case: List[Any] = 12
snake_case: Any = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
snake_case: Optional[Any] = 7_68
snake_case: Optional[Any] = 23_04
snake_case: Optional[int] = 8
snake_case: Union[str, Any] = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
snake_case: List[str] = 10_24
snake_case: Union[str, Any] = 40_96
snake_case: str = 24
snake_case: Union[str, Any] = 16
elif vit_name[4:].startswith('huge' ):
snake_case: Optional[Any] = 12_80
snake_case: Dict = 51_20
snake_case: Dict = 32
snake_case: Union[str, Any] = 16
# load original model from timm
snake_case: List[str] = timm.create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case: int = timm_model.state_dict()
if base_model:
remove_classification_head_(__A )
snake_case: Any = create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , __A )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case: str = ViTModel(__A ).eval()
else:
snake_case: Union[str, Any] = ViTForImageClassification(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case: List[Any] = DeiTImageProcessor(size=config.image_size )
else:
snake_case: Tuple = ViTImageProcessor(size=config.image_size )
snake_case: Optional[int] = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case: Any = encoding['pixel_values']
snake_case: List[str] = model(__A )
if base_model:
snake_case: Tuple = timm_model.forward_features(__A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__A , outputs.pooler_output , atol=1E-3 )
else:
snake_case: List[str] = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path) | 329 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCAmelCase : str = logging.getLogger(__name__)
def lowercase (_A , _A ):
"""simple docstring"""
if os.path.exists(_A ):
if os.path.exists(os.path.join(_A , 'config.json' ) ) and os.path.isfile(
os.path.join(_A , 'config.json' ) ):
os.remove(os.path.join(_A , 'config.json' ) )
if os.path.exists(os.path.join(_A , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(_A , 'pytorch_model.bin' ) ):
os.remove(os.path.join(_A , 'pytorch_model.bin' ) )
else:
os.makedirs(_A )
model.save_pretrained(_A )
def lowercase (_A , _A=False ):
"""simple docstring"""
_lowerCAmelCase : Tuple = 2
if unlogit:
_lowerCAmelCase : Union[str, Any] = torch.pow(_A , _A )
_lowerCAmelCase : List[Any] = p * torch.log(_A )
_lowerCAmelCase : List[str] = 0
return -plogp.sum(dim=-1 )
def lowercase (_A ):
"""simple docstring"""
logger.info('lv, h >\t' + '\t'.join(f'{x + 1}' for x in range(len(_A ) ) ) )
for row in range(len(_A ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:d}' for x in tensor[row].cpu().data ) )
def lowercase (_A , _A , _A , _A=True , _A=True , _A=None , _A=False ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_lowerCAmelCase : Union[str, Any] = torch.zeros(_A , _A ).to(args.device )
_lowerCAmelCase : List[str] = torch.zeros(_A , _A ).to(args.device )
if head_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(_A , _A ).to(args.device )
head_mask.requires_grad_(requires_grad=_A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_lowerCAmelCase : Any = None
_lowerCAmelCase : Any = 0.0
_lowerCAmelCase : List[str] = 0.0
for step, inputs in enumerate(tqdm(_A , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_lowerCAmelCase : Optional[int] = tuple(t.to(args.device ) for t in inputs )
((_lowerCAmelCase) , ) : Optional[int] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_lowerCAmelCase : str = model(_A , labels=_A , head_mask=_A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_A ):
_lowerCAmelCase : Optional[int] = entropy(attn.detach() , _A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_lowerCAmelCase : Tuple = 2
_lowerCAmelCase : Tuple = torch.pow(torch.pow(_A , _A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_lowerCAmelCase : Dict = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(_A )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(_A )
logger.info('Head ranked by importance scores' )
_lowerCAmelCase : Optional[int] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_lowerCAmelCase : Union[str, Any] = torch.arange(
head_importance.numel() , device=args.device )
_lowerCAmelCase : Tuple = head_ranks.view_as(_A )
print_ad_tensor(_A )
return attn_entropy, head_importance, total_loss
def lowercase (_A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = compute_heads_importance(_A , _A , _A , compute_entropy=_A )
_lowerCAmelCase : int = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , _A , original_score * args.masking_threshold )
_lowerCAmelCase : List[Any] = torch.ones_like(_A )
_lowerCAmelCase : Dict = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_lowerCAmelCase : Optional[Any] = original_score
while current_score >= original_score * args.masking_threshold:
_lowerCAmelCase : Dict = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_lowerCAmelCase : int = float('Inf' )
_lowerCAmelCase : int = head_importance.view(-1 ).sort()[1]
if len(_A ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_lowerCAmelCase : Optional[int] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_lowerCAmelCase : Dict = new_head_mask.view(-1 )
_lowerCAmelCase : List[Any] = 0.0
_lowerCAmelCase : Optional[Any] = new_head_mask.view_as(_A )
_lowerCAmelCase : Union[str, Any] = new_head_mask.clone().detach()
print_ad_tensor(_A )
# Compute metric and head importance again
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = compute_heads_importance(
_A , _A , _A , compute_entropy=_A , head_mask=_A )
_lowerCAmelCase : List[str] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , _A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(_A )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = datetime.now()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = compute_heads_importance(
_A , _A , _A , compute_entropy=_A , compute_importance=_A , head_mask=_A )
_lowerCAmelCase : Dict = 1 / loss
_lowerCAmelCase : Optional[int] = datetime.now() - before_time
_lowerCAmelCase : Dict = sum(p.numel() for p in model.parameters() )
_lowerCAmelCase : int = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_A ) )
}
for k, v in heads_to_prune.items():
if isinstance(_A , _A ):
_lowerCAmelCase : Dict = [
v,
]
assert sum(len(_A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_A )
_lowerCAmelCase : Dict = sum(p.numel() for p in model.parameters() )
_lowerCAmelCase : Union[str, Any] = datetime.now()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = compute_heads_importance(
_A , _A , _A , compute_entropy=_A , compute_importance=_A , head_mask=_A , actually_pruned=_A , )
_lowerCAmelCase : Dict = 1 / loss
_lowerCAmelCase : Tuple = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _A , _A , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , _A , _A )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(_A , args.output_dir )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=_A , type=_A , required=_A , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=_A , type=_A , required=_A , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=_A , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=_A , type=_A , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=_A , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=_A , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=_A , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=_A , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=_A , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=_A , help='Batch size.' )
parser.add_argument('--seed' , type=_A , default=4_2 )
parser.add_argument('--local_rank' , type=_A , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=_A , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_A , default='' , help='Can be used for distant debugging.' )
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_lowerCAmelCase : Dict = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_lowerCAmelCase : Union[str, Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_lowerCAmelCase : List[str] = torch.device('cuda' , args.local_rank )
_lowerCAmelCase : Any = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_lowerCAmelCase : str = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_lowerCAmelCase : Any = nn.parallel.DistributedDataParallel(
_A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_A )
elif args.n_gpu > 1:
_lowerCAmelCase : int = nn.DataParallel(_A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_A )
torch.save(_A , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , _A )
# Prepare dataset
_lowerCAmelCase : List[str] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_lowerCAmelCase : Dict = (torch.from_numpy(_A ),)
_lowerCAmelCase : str = TensorDataset(*_A )
_lowerCAmelCase : Dict = RandomSampler(_A )
_lowerCAmelCase : Any = DataLoader(_A , sampler=_A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_A , _A , _A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_lowerCAmelCase : Union[str, Any] = mask_heads(_A , _A , _A )
prune_heads(_A , _A , _A , _A )
if __name__ == "__main__":
main()
| 630 |
'''simple docstring'''
lowerCAmelCase : Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase : list[bool | None] = [None] * 10_00_00_00
lowerCAmelCase : List[str] = True
lowerCAmelCase : Union[str, Any] = False
def lowercase (_A ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_lowerCAmelCase : Any = chain(next_number(_A ) )
_lowerCAmelCase : List[str] = number_chain
while number < 1_0_0_0_0_0_0_0:
_lowerCAmelCase : Tuple = number_chain
number *= 1_0
return number_chain
def lowercase (_A = 1_0_0_0_0_0_0_0 ):
"""simple docstring"""
for i in range(1 , _A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 630 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 156 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __lowerCamelCase :
__UpperCamelCase = 42
# setable values
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = None
@classmethod
def A__ (cls , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return cls(common=lowerCamelCase , init_noise_sigma=lowerCamelCase , timesteps=lowerCamelCase )
@dataclass
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
class __lowerCamelCase ( __lowercase , __lowercase ):
__UpperCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__UpperCamelCase = 42
@property
def A__ (self ):
'''simple docstring'''
return True
@register_to_config
def __init__(self , lowerCamelCase = 1_000 , lowerCamelCase = 0.0001 , lowerCamelCase = 0.02 , lowerCamelCase = "linear" , lowerCamelCase = None , lowerCamelCase = "fixed_small" , lowerCamelCase = True , lowerCamelCase = "epsilon" , lowerCamelCase = jnp.floataa , ):
'''simple docstring'''
_lowerCAmelCase = dtype
def A__ (self , lowerCamelCase = None ):
'''simple docstring'''
if common is None:
_lowerCAmelCase = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_lowerCAmelCase = jnp.array(1.0 , dtype=self.dtype )
_lowerCAmelCase = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCamelCase , init_noise_sigma=lowerCamelCase , timesteps=lowerCamelCase , )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
return sample
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase = () ):
'''simple docstring'''
_lowerCAmelCase = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_lowerCAmelCase = (jnp.arange(0 , lowerCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCamelCase , timesteps=lowerCamelCase , )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase = state.common.alphas_cumprod[t]
_lowerCAmelCase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCAmelCase = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_lowerCAmelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_lowerCAmelCase = jnp.clip(lowerCamelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_lowerCAmelCase = jnp.log(jnp.clip(lowerCamelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
_lowerCAmelCase = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_lowerCAmelCase = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_lowerCAmelCase = variance
_lowerCAmelCase = state.common.betas[t]
_lowerCAmelCase = (predicted_variance + 1) / 2
_lowerCAmelCase = frac * max_log + (1 - frac) * min_log
return variance
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True , ):
'''simple docstring'''
_lowerCAmelCase = timestep
if key is None:
_lowerCAmelCase = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_lowerCAmelCase , _lowerCAmelCase = jnp.split(lowerCamelCase , sample.shape[1] , axis=1 )
else:
_lowerCAmelCase = None
# 1. compute alphas, betas
_lowerCAmelCase = state.common.alphas_cumprod[t]
_lowerCAmelCase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_lowerCAmelCase = 1 - alpha_prod_t
_lowerCAmelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCAmelCase = model_output
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCAmelCase = jnp.clip(lowerCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCAmelCase = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_lowerCAmelCase = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_lowerCAmelCase = jax.random.split(lowerCamelCase , num=1 )
_lowerCAmelCase = jax.random.normal(lowerCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCamelCase , lowerCamelCase , predicted_variance=lowerCamelCase ) ** 0.5) * noise
_lowerCAmelCase = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_lowerCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCamelCase , state=lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
return add_noise_common(state.common , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
return get_velocity_common(state.common , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __len__(self ):
'''simple docstring'''
return self.config.num_train_timesteps | 156 | 1 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __lowerCamelCase ( unittest.TestCase ):
lowerCamelCase_ : List[str] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
snake_case_ = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
snake_case_ = VideoClassificationPipeline(model=lowerCamelCase , image_processor=lowerCamelCase , top_k=2 )
snake_case_ = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase ) -> List[Any]:
for example in examples:
snake_case_ = video_classifier(lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
{"""score""": ANY(lowerCamelCase ), """label""": ANY(lowerCamelCase )},
{"""score""": ANY(lowerCamelCase ), """label""": ANY(lowerCamelCase )},
] , )
@require_torch
def lowerCAmelCase_ ( self ) -> int:
snake_case_ = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
snake_case_ = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
snake_case_ = pipeline(
"""video-classification""" , model=lowerCamelCase , feature_extractor=lowerCamelCase , frame_sampling_rate=4 )
snake_case_ = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
snake_case_ = video_classifier(lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , )
snake_case_ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] , )
@require_tf
def lowerCAmelCase_ ( self ) -> Dict:
pass | 161 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowerCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase_ = {
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
lowerCamelCase_ = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : int = ElectraTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase="[UNK]" , lowerCamelCase="[SEP]" , lowerCamelCase="[PAD]" , lowerCamelCase="[CLS]" , lowerCamelCase="[MASK]" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Union[str, Any]:
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(lowerCamelCase , normalizer_state.pop("""type""" ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**lowerCamelCase )
snake_case_ = do_lower_case
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase=None ) -> Dict:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase ) | 161 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self : Dict ) -> Optional[Any]:
torch.manual_seed(0 )
_lowercase : int = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
@property
def _lowerCamelCase ( self : str ) -> Dict:
torch.manual_seed(0 )
_lowercase : Optional[Any] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=3 ,)
return model
@property
def _lowerCamelCase ( self : int ) -> Dict:
torch.manual_seed(0 )
_lowercase : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(UpperCamelCase )
def _lowerCamelCase ( self : str ) -> List[Any]:
_lowercase : Optional[int] = self.dummy_uncond_unet
_lowercase : str = DDIMScheduler()
_lowercase : int = self.dummy_vq_model
_lowercase : str = LDMPipeline(unet=UpperCamelCase ,vqvae=UpperCamelCase ,scheduler=UpperCamelCase )
ldm.to(UpperCamelCase )
ldm.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Any = torch.manual_seed(0 )
_lowercase : Any = ldm(generator=UpperCamelCase ,num_inference_steps=2 ,output_type='numpy' ).images
_lowercase : List[str] = torch.manual_seed(0 )
_lowercase : List[str] = ldm(generator=UpperCamelCase ,num_inference_steps=2 ,output_type='numpy' ,return_dict=UpperCamelCase )[0]
_lowercase : Tuple = image[0, -3:, -3:, -1]
_lowercase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : List[Any] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
_lowercase : int = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : str ) -> str:
_lowercase : Tuple = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(UpperCamelCase )
ldm.set_progress_bar_config(disable=UpperCamelCase )
_lowercase : Optional[int] = torch.manual_seed(0 )
_lowercase : Dict = ldm(generator=UpperCamelCase ,num_inference_steps=5 ,output_type='numpy' ).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase : Tuple = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
_lowercase : Optional[int] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 125 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A = logging.get_logger(__name__)
# TODO: upload to AWS
A = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = "retribert"
def __init__( self : Union[str, Any] ,UpperCamelCase : str=3_0522 ,UpperCamelCase : Dict=768 ,UpperCamelCase : Tuple=8 ,UpperCamelCase : Union[str, Any]=12 ,UpperCamelCase : int=3072 ,UpperCamelCase : Union[str, Any]="gelu" ,UpperCamelCase : int=0.1 ,UpperCamelCase : Tuple=0.1 ,UpperCamelCase : Any=512 ,UpperCamelCase : Any=2 ,UpperCamelCase : Optional[Any]=0.0_2 ,UpperCamelCase : List[str]=1e-12 ,UpperCamelCase : Tuple=True ,UpperCamelCase : List[Any]=128 ,UpperCamelCase : Dict=0 ,**UpperCamelCase : str ,) -> Dict:
super().__init__(pad_token_id=UpperCamelCase ,**UpperCamelCase )
_lowercase : int = vocab_size
_lowercase : Dict = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : str = hidden_act
_lowercase : Union[str, Any] = intermediate_size
_lowercase : int = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : Any = type_vocab_size
_lowercase : List[Any] = initializer_range
_lowercase : List[str] = layer_norm_eps
_lowercase : List[str] = share_encoders
_lowercase : Optional[Any] = projection_dim | 125 | 1 |
def UpperCamelCase ( _a = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
'''simple docstring'''
try:
lowercase_ :Optional[int] = int(a_ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase_ :str = 2
lowercase_ :Optional[int] = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase_ :Union[str, Any] = i
while n % i == 0:
lowercase_ :Optional[Any] = n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(f"{solution() = }")
| 713 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self ):
lowercase_ :Dict = 1
lowercase_ :Optional[Any] = 3
lowercase_ :Optional[int] = (32, 32)
lowercase_ :Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase_ )
@property
def UpperCamelCase ( self ):
def extract(*UpperCamelCase_ , **UpperCamelCase_ ):
class UpperCamelCase :
'''simple docstring'''
def __init__( self ):
lowercase_ :List[str] = torch.ones([0] )
def UpperCamelCase ( self , UpperCamelCase_ ):
self.pixel_values.to(UpperCamelCase_ )
return self
return Out()
return extract
def UpperCamelCase ( self ):
lowercase_ :List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :Dict = self.dummy_cond_unet
lowercase_ :Optional[Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
lowercase_ :Union[str, Any] = self.dummy_vae
lowercase_ :Any = self.dummy_text_encoder
lowercase_ :List[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowercase_ :List[str] = 77
lowercase_ :int = self.dummy_image.to(UpperCamelCase_ )
lowercase_ :Optional[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ :str = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase_ )
lowercase_ :int = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :Any = '''A painting of a squirrel eating a burger'''
lowercase_ :Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = alt_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=UpperCamelCase_ , )
lowercase_ :Dict = output.images
lowercase_ :Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = alt_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
lowercase_ :Optional[int] = image[0, -3:, -3:, -1]
lowercase_ :Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ :int = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.dummy_cond_unet
lowercase_ :Dict = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
lowercase_ :Tuple = self.dummy_vae
lowercase_ :Dict = self.dummy_text_encoder
lowercase_ :Tuple = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowercase_ :str = 77
lowercase_ :str = self.dummy_image.to(UpperCamelCase_ )
# put models in fp16
lowercase_ :Union[str, Any] = unet.half()
lowercase_ :Union[str, Any] = vae.half()
lowercase_ :List[str] = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ :List[Any] = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase_ )
lowercase_ :List[str] = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :str = '''A painting of a squirrel eating a burger'''
lowercase_ :Union[str, Any] = torch.manual_seed(0 )
lowercase_ :Any = alt_pipe(
[prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' , image=UpperCamelCase_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ :Optional[Any] = init_image.resize((760, 504) )
lowercase_ :List[str] = '''BAAI/AltDiffusion'''
lowercase_ :Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
lowercase_ :Optional[Any] = '''A fantasy landscape, trending on artstation'''
lowercase_ :Optional[Any] = torch.manual_seed(0 )
lowercase_ :str = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase_ :Optional[Any] = output.images[0]
lowercase_ :Optional[int] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ :Any = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowercase_ :Any = init_image.resize((768, 512) )
lowercase_ :List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowercase_ :List[Any] = '''BAAI/AltDiffusion'''
lowercase_ :Tuple = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
lowercase_ :List[str] = '''A fantasy landscape, trending on artstation'''
lowercase_ :Optional[int] = torch.manual_seed(0 )
lowercase_ :Tuple = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase_ :int = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 441 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__A : Optional[Any] = TypeVar('''T''')
class _UpperCAmelCase ( Generic[T] ):
def __init__( self : Optional[int] , A : list[T] , A : Callable[[T, T], T] ) -> None:
lowercase_ : Any | T = None
lowercase_ : int = len(A )
lowercase_ : list[T] = [any_type for _ in range(self.N )] + arr
lowercase_ : Optional[int] = fnc
self.build()
def A ( self : Union[str, Any] ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
lowercase_ : Dict = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def A ( self : Union[str, Any] , A : int , A : T ) -> None:
p += self.N
lowercase_ : Optional[int] = v
while p > 1:
lowercase_ : int = p // 2
lowercase_ : Dict = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def A ( self : List[Any] , A : int , A : int ) -> T | None: # noqa: E741
lowercase_ , lowercase_ : Dict = l + self.N, r + self.N
lowercase_ : T | None = None
while l <= r:
if l % 2 == 1:
lowercase_ : Dict = self.st[l] if res is None else self.fn(A , self.st[l] )
if r % 2 == 0:
lowercase_ : Tuple = self.st[r] if res is None else self.fn(A , self.st[r] )
lowercase_ , lowercase_ : str = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__A : Optional[Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__A : Any = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__A : Dict = SegmentTree(test_array, min)
__A : int = SegmentTree(test_array, max)
__A : Tuple = SegmentTree(test_array, lambda a, b: a + b)
def lowercase ( ):
for i in range(len(__snake_case ) ):
for j in range(__snake_case , len(__snake_case ) ):
lowercase_ : Tuple = reduce(__snake_case , test_array[i : j + 1] )
lowercase_ : Any = reduce(__snake_case , test_array[i : j + 1] )
lowercase_ : Dict = reduce(lambda __snake_case , __snake_case : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__snake_case , __snake_case )
assert max_range == max_segment_tree.query(__snake_case , __snake_case )
assert sum_range == sum_segment_tree.query(__snake_case , __snake_case )
test_all_segments()
for index, value in test_updates.items():
__A : List[str] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 231 |
"""simple docstring"""
import math
import qiskit
def lowercase ( __snake_case : int = 1 , __snake_case : int = 1 , __snake_case : int = 1 ):
if (
isinstance(__snake_case , __snake_case )
or isinstance(__snake_case , __snake_case )
or isinstance(__snake_case , __snake_case )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__snake_case ) != input_a)
or (math.floor(__snake_case ) != input_a)
or (math.floor(__snake_case ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowercase_ : List[Any] = qiskit.QuantumRegister(4 , '''qr''' )
lowercase_ : Dict = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowercase_ : Optional[Any] = [input_a, input_a, carry_in]
lowercase_ : List[str] = qiskit.QuantumCircuit(__snake_case , __snake_case )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__snake_case ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__snake_case ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__snake_case ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __snake_case ) # measure the last two qbits
lowercase_ : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
lowercase_ : Optional[int] = qiskit.execute(__snake_case , __snake_case , shots=1_0_0_0 )
return job.result().get_counts(__snake_case )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 231 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def A (__lowerCamelCase :Union[str, Any] ):
_lowerCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def A (__lowerCamelCase :Tuple ):
_lowerCAmelCase , _lowerCAmelCase = emb.weight.shape
_lowerCAmelCase = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_lowerCAmelCase = emb.weight.data
return lin_layer
def A (__lowerCamelCase :List[str] ):
_lowerCAmelCase = torch.load(__lowerCamelCase , map_location="""cpu""" )
_lowerCAmelCase = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
_lowerCAmelCase = mam_aaa["""model"""]
remove_ignore_keys_(__lowerCamelCase )
_lowerCAmelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_lowerCAmelCase = MaMaaaConfig(
vocab_size=__lowerCamelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
_lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""]
_lowerCAmelCase = MaMaaaForConditionalGeneration(__lowerCamelCase )
model.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
_lowerCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowercase = parser.parse_args()
_lowercase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 709 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def A (__lowerCamelCase :Any ):
_lowerCAmelCase = np.max(__lowerCamelCase , axis=-1 , keepdims=__lowerCamelCase )
_lowerCAmelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
if "second_text" in kwargs:
_lowerCAmelCase = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def _lowercase ( self , _lowercase , _lowercase=None ):
"""simple docstring"""
return self.tokenizer(_lowercase , text_pair=_lowercase , return_tensors=self.framework )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return self.model(**_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.logits[0].numpy()
_lowerCAmelCase = softmax(_lowercase )
_lowerCAmelCase = np.argmax(_lowercase )
_lowerCAmelCase = self.model.config.idalabel[best_class]
_lowerCAmelCase = probabilities[best_class].item()
_lowerCAmelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 162 | 0 |
from __future__ import annotations
def _snake_case (__lowercase):
UpperCamelCase_ = 2
UpperCamelCase_ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__lowercase)
if n > 1:
factors.append(__lowercase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
import math
from datetime import datetime, timedelta
def _snake_case (__lowercase):
UpperCamelCase_ = year % 19
UpperCamelCase_ = year % 4
UpperCamelCase_ = year % 7
UpperCamelCase_ = math.floor(year / 100)
UpperCamelCase_ = math.floor((13 + 8 * leap_day_inhibits) / 25)
UpperCamelCase_ = leap_day_inhibits / 4
UpperCamelCase_ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase_ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 19)
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 18)
else:
return datetime(__lowercase , 3 , 22) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday))
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
snake_case__ : Dict = """will be""" if year > datetime.now().year else """was"""
print(f'Easter in {year} {tense} {gauss_easter(year)}')
| 23 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def A__ ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowerCamelCase__ = []
for num in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ = odd_composites[num] - 2 * i * i
if is_prime(__lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__lowerCAmelCase ) == n:
return list_nums
return []
def A__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCamelCase : Optional[Any] = ['small', 'medium', 'large']
UpperCamelCase : Dict = 'lm_head.decoder.weight'
UpperCamelCase : int = 'lm_head.weight'
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
lowerCamelCase__ = torch.load(__lowerCAmelCase )
lowerCamelCase__ = d.pop(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
UpperCamelCase : Dict = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl')
UpperCamelCase : str = F'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 9 | 1 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__a : Any = logging.get_logger(__name__)
__a : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__a : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
__a : Tuple = {
"allenai/led-base-16384": 16384,
}
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = LEDTokenizer
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self : List[str] , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : List[str]="replace" , UpperCamelCase_ : int="<s>" , UpperCamelCase_ : Dict="</s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : Dict="<s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : List[Any]="<pad>" , UpperCamelCase_ : List[str]="<mask>" , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : List[Any]=True , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , **UpperCamelCase_ , )
__A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase_ ) != add_prefix_space:
__A = getattr(UpperCamelCase_ , pre_tok_state.pop("""type""" ) )
__A = add_prefix_space
__A = pre_tok_class(**UpperCamelCase_ )
__A = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__A = """post_processor"""
__A = getattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
if tokenizer_component_instance:
__A = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__A = tuple(state["""sep"""] )
if "cls" in state:
__A = tuple(state["""cls"""] )
__A = False
if state.get("""add_prefix_space""" , UpperCamelCase_ ) != add_prefix_space:
__A = add_prefix_space
__A = True
if state.get("""trim_offsets""" , UpperCamelCase_ ) != trim_offsets:
__A = trim_offsets
__A = True
if changes_to_apply:
__A = getattr(UpperCamelCase_ , state.pop("""type""" ) )
__A = component_class(**UpperCamelCase_ )
setattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : int ):
"""simple docstring"""
__A = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else value
__A = value
def lowerCAmelCase_ ( self : Optional[Any] , *UpperCamelCase_ : int , **UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = kwargs.get("""is_split_into_words""" , UpperCamelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : Dict , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : str ):
"""simple docstring"""
__A = kwargs.get("""is_split_into_words""" , UpperCamelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
"""simple docstring"""
__A = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=None ):
"""simple docstring"""
__A = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ):
"""simple docstring"""
__A = super()._pad(
encoded_inputs=UpperCamelCase_ , max_length=UpperCamelCase_ , padding_strategy=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
__A = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__A = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__A = len(encoded_inputs["""global_attention_mask"""] ) != len(UpperCamelCase_ )
if needs_to_be_padded:
__A = len(UpperCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__A = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__A = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 637 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
__a : Union[str, Any] = logging.get_logger(__name__)
__a : Tuple = "Hello world! cécé herlolip"
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : str , __lowercase : bool ) -> List[Any]:
"""simple docstring"""
__A = FairseqRobertaModel.from_pretrained(__lowercase )
roberta.eval() # disable dropout
__A = roberta.model.encoder.sentence_encoder
__A = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
__A = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , __lowercase )
__A = XLMRobertaXLForSequenceClassification(__lowercase ) if classification_head else XLMRobertaXLForMaskedLM(__lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__A = roberta_sent_encoder.embed_tokens.weight
__A = roberta_sent_encoder.embed_positions.weight
__A = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__A = roberta_sent_encoder.layer_norm.weight
__A = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__A = model.roberta.encoder.layer[i]
__A = roberta_sent_encoder.layers[i]
__A = layer.attention
__A = roberta_layer.self_attn_layer_norm.weight
__A = roberta_layer.self_attn_layer_norm.bias
# self attention
__A = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__A = roberta_layer.self_attn.q_proj.weight
__A = roberta_layer.self_attn.q_proj.bias
__A = roberta_layer.self_attn.k_proj.weight
__A = roberta_layer.self_attn.k_proj.bias
__A = roberta_layer.self_attn.v_proj.weight
__A = roberta_layer.self_attn.v_proj.bias
# self-attention output
__A = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__A = roberta_layer.self_attn.out_proj.weight
__A = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__A = roberta_layer.final_layer_norm.weight
__A = roberta_layer.final_layer_norm.bias
# intermediate
__A = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__A = roberta_layer.fca.weight
__A = roberta_layer.fca.bias
# output
__A = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__A = roberta_layer.fca.weight
__A = roberta_layer.fca.bias
# end of layer
if classification_head:
__A = roberta.model.classification_heads["""mnli"""].dense.weight
__A = roberta.model.classification_heads["""mnli"""].dense.bias
__A = roberta.model.classification_heads["""mnli"""].out_proj.weight
__A = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__A = roberta.model.encoder.lm_head.dense.weight
__A = roberta.model.encoder.lm_head.dense.bias
__A = roberta.model.encoder.lm_head.layer_norm.weight
__A = roberta.model.encoder.lm_head.layer_norm.bias
__A = roberta.model.encoder.lm_head.weight
__A = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__A = roberta.encode(__lowercase ).unsqueeze(0 ) # batch of size 1
__A = model(__lowercase )[0]
if classification_head:
__A = roberta.model.classification_heads["""mnli"""](roberta.extract_features(__lowercase ) )
else:
__A = roberta.model(__lowercase )[0]
print(our_output.shape , their_output.shape )
__A = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__A = torch.allclose(__lowercase , __lowercase , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(__lowercase ).mkdir(parents=__lowercase , exist_ok=__lowercase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
__a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__a : Dict = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 637 | 1 |
'''simple docstring'''
from __future__ import annotations
_lowercase : List[str] =[
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __UpperCAmelCase ( UpperCamelCase__ :list[list[int]] , UpperCamelCase__ :list[int] , UpperCamelCase__ :list[int] , UpperCamelCase__ :int , UpperCamelCase__ :list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
snake_case__ : List[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
snake_case__ : Tuple = 1
snake_case__ : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
snake_case__ : str = init[0]
snake_case__ : List[str] = init[1]
snake_case__ : Any = 0
snake_case__ : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
snake_case__ : Optional[int] = [[f, g, x, y]]
snake_case__ : Any = False # flag that is set when search is complete
snake_case__ : Tuple = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
snake_case__ : List[str] = cell.pop()
snake_case__ : Optional[Any] = next_cell[2]
snake_case__ : List[Any] = next_cell[3]
snake_case__ : Dict = next_cell[1]
if x == goal[0] and y == goal[1]:
snake_case__ : Optional[Any] = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
snake_case__ : str = x + DIRECTIONS[i][0]
snake_case__ : Optional[Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
snake_case__ : Optional[int] = g + cost
snake_case__ : Union[str, Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
snake_case__ : Tuple = 1
snake_case__ : int = i
snake_case__ : str = []
snake_case__ : str = goal[0]
snake_case__ : Union[str, Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
snake_case__ : Optional[int] = x - DIRECTIONS[action[x][y]][0]
snake_case__ : Union[str, Any] = y - DIRECTIONS[action[x][y]][1]
snake_case__ : Union[str, Any] = xa
snake_case__ : List[str] = ya
invpath.append([x, y] )
snake_case__ : Optional[int] = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
_lowercase : List[str] =[
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_lowercase : Optional[Any] =[0, 0]
# all coordinates are given in format [y,x]
_lowercase : Union[str, Any] =[len(grid) - 1, len(grid[0]) - 1]
_lowercase : int =1
# the cost map which pushes the path closer to the goal
_lowercase : Optional[int] =[[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_lowercase : Any =abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_lowercase : Optional[Any] =99
_lowercase , _lowercase : Optional[Any] =search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 574 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def __UpperCAmelCase ( UpperCamelCase__ :bytes ) -> bytes:
if len(UpperCamelCase__ ) != 32:
raise ValueError('''Input must be of length 32''' )
snake_case__ : Any = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> bytes:
if i < 0:
raise ValueError('''Input must be non-negative''' )
snake_case__ : Union[str, Any] = format(UpperCamelCase__ , '''08x''' )[-8:]
snake_case__ : Dict = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def __UpperCAmelCase ( UpperCamelCase__ :bytes ) -> bytes:
snake_case__ : Optional[Any] = B''''''
for char in message:
bit_string += format(UpperCamelCase__ , '''08b''' ).encode('''utf-8''' )
snake_case__ : List[str] = format(len(UpperCamelCase__ ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __UpperCAmelCase ( UpperCamelCase__ :bytes ) -> Generator[list[int], None, None]:
if len(UpperCamelCase__ ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase__ ) , 512 ):
snake_case__ : Union[str, Any] = bit_string[pos : pos + 512]
snake_case__ : Optional[int] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
snake_case__ : Tuple = format(UpperCamelCase__ , '''032b''' )
snake_case__ : Any = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase__ , 2 )
def __UpperCAmelCase ( UpperCamelCase__ :int , UpperCamelCase__ :int ) -> int:
return (a + b) % 2**32
def __UpperCAmelCase ( UpperCamelCase__ :int , UpperCamelCase__ :int ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __UpperCAmelCase ( UpperCamelCase__ :bytes ) -> bytes:
snake_case__ : int = preprocess(UpperCamelCase__ )
snake_case__ : str = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
snake_case__ : List[str] = 0x67452301
snake_case__ : Any = 0xefcdab89
snake_case__ : List[Any] = 0x98badcfe
snake_case__ : int = 0x10325476
snake_case__ : Any = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase__ ):
snake_case__ : Dict = aa
snake_case__ : Tuple = ba
snake_case__ : Any = ca
snake_case__ : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
snake_case__ : Dict = d ^ (b & (c ^ d))
snake_case__ : Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
snake_case__ : Optional[Any] = c ^ (d & (b ^ c))
snake_case__ : Tuple = (5 * i + 1) % 16
elif i <= 47:
snake_case__ : Union[str, Any] = b ^ c ^ d
snake_case__ : List[str] = (3 * i + 5) % 16
else:
snake_case__ : int = c ^ (b | not_aa(UpperCamelCase__ ))
snake_case__ : Optional[Any] = (7 * i) % 16
snake_case__ : List[Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
snake_case__ : Optional[int] = d
snake_case__ : Dict = c
snake_case__ : Dict = b
snake_case__ : int = sum_aa(UpperCamelCase__ , left_rotate_aa(UpperCamelCase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
snake_case__ : Union[str, Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Union[str, Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Union[str, Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : List[Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Optional[int] = reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 574 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __lowercase (lowercase__ ):
_lowerCamelCase = """ibert"""
def __init__( self : Any , UpperCAmelCase_ : List[str]=30_522 , UpperCAmelCase_ : Optional[Any]=768 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Optional[int]=12 , UpperCAmelCase_ : str=3_072 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Any=1e-12 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[str]="absolute" , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Union[str, Any]="none" , **UpperCAmelCase_ : List[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_)
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : Union[str, Any] = num_hidden_layers
UpperCamelCase__ : Optional[int] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Optional[int] = intermediate_size
UpperCamelCase__ : Tuple = hidden_dropout_prob
UpperCamelCase__ : Dict = attention_probs_dropout_prob
UpperCamelCase__ : Dict = max_position_embeddings
UpperCamelCase__ : List[Any] = type_vocab_size
UpperCamelCase__ : Dict = initializer_range
UpperCamelCase__ : Optional[int] = layer_norm_eps
UpperCamelCase__ : Optional[Any] = position_embedding_type
UpperCamelCase__ : List[str] = quant_mode
UpperCamelCase__ : str = force_dequant
class __lowercase (lowercase__ ):
@property
def __UpperCamelCase ( self : List[Any]):
if self.task == "multiple-choice":
UpperCamelCase__ : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 596 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self ):
lowercase_ :int = 1
lowercase_ :Optional[int] = 3
lowercase_ :Optional[int] = (32, 32)
lowercase_ :str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :Optional[int] = self.dummy_cond_unet_upscale
lowercase_ :str = DDPMScheduler()
lowercase_ :Optional[int] = DDIMScheduler(prediction_type='''v_prediction''' )
lowercase_ :Any = self.dummy_vae
lowercase_ :Optional[Any] = self.dummy_text_encoder
lowercase_ :Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase_ :List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ :Any = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowercase_ :Optional[Any] = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
lowercase_ :Dict = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :str = '''A painting of a squirrel eating a burger'''
lowercase_ :List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :List[Any] = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowercase_ :Tuple = output.images
lowercase_ :Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCamelCase_ , )[0]
lowercase_ :Dict = image[0, -3:, -3:, -1]
lowercase_ :Any = image_from_tuple[0, -3:, -3:, -1]
lowercase_ :str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowercase_ :Tuple = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self ):
lowercase_ :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :List[str] = self.dummy_cond_unet_upscale
lowercase_ :int = DDPMScheduler()
lowercase_ :Union[str, Any] = DDIMScheduler(prediction_type='''v_prediction''' )
lowercase_ :List[Any] = self.dummy_vae
lowercase_ :int = self.dummy_text_encoder
lowercase_ :Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase_ :Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ :Any = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowercase_ :List[str] = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
lowercase_ :Dict = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :Optional[Any] = '''A painting of a squirrel eating a burger'''
lowercase_ :Any = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowercase_ :Dict = output.images
assert image.shape[0] == 2
lowercase_ :Union[str, Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Dict = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
lowercase_ :Tuple = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase ( self ):
lowercase_ :int = self.dummy_cond_unet_upscale
lowercase_ :str = DDPMScheduler()
lowercase_ :List[str] = DDIMScheduler(prediction_type='''v_prediction''' )
lowercase_ :Optional[int] = self.dummy_vae
lowercase_ :List[Any] = self.dummy_text_encoder
lowercase_ :Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase_ :str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ :Tuple = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowercase_ :Any = unet.half()
lowercase_ :Tuple = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowercase_ :str = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
lowercase_ :Any = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :Dict = '''A painting of a squirrel eating a burger'''
lowercase_ :str = torch.manual_seed(0 )
lowercase_ :Union[str, Any] = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' , ).images
lowercase_ :int = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowercase_ :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
lowercase_ :Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler'''
lowercase_ :int = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
lowercase_ :List[Any] = '''a cat sitting on a park bench'''
lowercase_ :int = torch.manual_seed(0 )
lowercase_ :Optional[Any] = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase_ :str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowercase_ :Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
lowercase_ :List[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
lowercase_ :Tuple = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
lowercase_ :Dict = '''a cat sitting on a park bench'''
lowercase_ :Union[str, Any] = torch.manual_seed(0 )
lowercase_ :Dict = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase_ :Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowercase_ :Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler'''
lowercase_ :Dict = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase_ :int = '''a cat sitting on a park bench'''
lowercase_ :int = torch.manual_seed(0 )
lowercase_ :Union[str, Any] = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , output_type='''np''' , )
lowercase_ :str = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 257 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> int:
"""simple docstring"""
assert column_title.isupper()
snake_case_ : Optional[int] = 0
snake_case_ : List[Any] = len(__magic_name__ ) - 1
snake_case_ : List[str] = 0
while index >= 0:
snake_case_ : int = (ord(column_title[index] ) - 64) * pow(26 ,__magic_name__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 656 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : Any = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowerCamelCase : Any = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = None
# source code of `config_class`
snake_case_ : List[Any] = inspect.getsource(__magic_name__ )
snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
snake_case_ : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : str = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case_ : Dict = ckpt_name
break
return checkpoint
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ )
snake_case_ : Union[str, Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 656 | 1 |
"""simple docstring"""
from collections import defaultdict
def snake_case_ ( A_ : str, A_ : str ):
'''simple docstring'''
_lowerCamelCase : List[str] = first_str.lower().strip()
_lowerCamelCase : List[str] = second_str.lower().strip()
# Remove whitespace
_lowerCamelCase : str = first_str.replace(''' ''', '''''' )
_lowerCamelCase : Dict = second_str.replace(''' ''', '''''' )
# Strings of different lengths are not anagrams
if len(A_ ) != len(A_ ):
return False
# Default values for count should be 0
_lowerCamelCase : defaultdict[str, int] = defaultdict(A_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(A_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase__ = input('''Enter the first string ''').strip()
lowerCAmelCase__ = input('''Enter the second string ''').strip()
lowerCAmelCase__ = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 83 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_UpperCAmelCase : Union[str, Any] = (720, 1280) # Height, Width
_UpperCAmelCase : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
_UpperCAmelCase : Optional[Any] = 1 / 100
_UpperCAmelCase : Optional[Any] = """"""
_UpperCAmelCase : int = """"""
_UpperCAmelCase : Union[str, Any] = """"""
_UpperCAmelCase : List[Any] = 250
def snake_case__ ( ) -> None:
_UpperCamelCase, _UpperCamelCase : List[Any] = get_dataset(UpperCamelCase ,UpperCamelCase )
for index in range(UpperCamelCase ):
_UpperCamelCase : List[str] = random.sample(range(len(UpperCamelCase ) ) ,4 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = update_image_and_anno(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,filter_scale=UpperCamelCase ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase : List[str] = random_chars(32 )
_UpperCamelCase : List[str] = path.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
_UpperCamelCase : Any = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' ,UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_UpperCamelCase : Any = []
for anno in new_annos:
_UpperCamelCase : List[Any] = anno[3] - anno[1]
_UpperCamelCase : int = anno[4] - anno[2]
_UpperCamelCase : int = anno[1] + width / 2
_UpperCamelCase : int = anno[2] + height / 2
_UpperCamelCase : Optional[Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCamelCase )
with open(f'''{file_root}.txt''' ,'''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> tuple[list, list]:
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase ,'''*.txt''' ) ):
_UpperCamelCase : int = label_file.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
with open(UpperCamelCase ) as in_file:
_UpperCamelCase : Dict = in_file.readlines()
_UpperCamelCase : Tuple = os.path.join(UpperCamelCase ,f'''{label_name}.jpg''' )
_UpperCamelCase : Tuple = []
for obj_list in obj_lists:
_UpperCamelCase : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
_UpperCamelCase : Tuple = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase : Tuple = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 0.0 ,) -> tuple[list, list, str]:
_UpperCamelCase : Optional[int] = np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_UpperCamelCase : str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = int(scale_x * output_size[1] )
_UpperCamelCase : Dict = int(scale_y * output_size[0] )
_UpperCamelCase : int = []
_UpperCamelCase : Union[str, Any] = []
for i, index in enumerate(UpperCamelCase ):
_UpperCamelCase : Optional[int] = all_img_list[index]
path_list.append(UpperCamelCase )
_UpperCamelCase : str = all_annos[index]
_UpperCamelCase : Tuple = cva.imread(UpperCamelCase )
if i == 0: # top-left
_UpperCamelCase : Any = cva.resize(UpperCamelCase ,(divid_point_x, divid_point_y) )
_UpperCamelCase : Any = img
for bbox in img_annos:
_UpperCamelCase : List[Any] = bbox[1] * scale_x
_UpperCamelCase : Dict = bbox[2] * scale_y
_UpperCamelCase : Any = bbox[3] * scale_x
_UpperCamelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase : Union[str, Any] = cva.resize(UpperCamelCase ,(output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase : List[Any] = img
for bbox in img_annos:
_UpperCamelCase : Any = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Optional[Any] = bbox[2] * scale_y
_UpperCamelCase : Any = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase : Dict = cva.resize(UpperCamelCase ,(divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : List[str] = img
for bbox in img_annos:
_UpperCamelCase : int = bbox[1] * scale_x
_UpperCamelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : int = bbox[3] * scale_x
_UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase : Dict = cva.resize(
UpperCamelCase ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : Union[str, Any] = img
for bbox in img_annos:
_UpperCamelCase : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case__ ( UpperCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 683 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 0 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def A__ ( __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : int=10 , __lowerCAmelCase : Optional[Any]=100 , __lowerCAmelCase : Union[str, Any]=1026 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[Any]="data/tokenized_stories_train_wikitext103.jbl" , __lowerCAmelCase : Optional[int]="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
lowerCamelCase__ , lowerCamelCase__ = generate_datasets(
__lowerCAmelCase , __lowerCAmelCase , number=__lowerCAmelCase , min_len=1026 , trim=__lowerCAmelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowerCamelCase__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
lowerCamelCase__ = load_gpta("""gpt2""" ).to(__lowerCAmelCase )
print("""computing perplexity on objective set""" )
lowerCamelCase__ = compute_perplexity(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).item()
print("""perplexity on objective set:""" , __lowerCAmelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Any=15 , __lowerCAmelCase : List[str]=128 , __lowerCAmelCase : Any=100 , __lowerCAmelCase : Optional[int]="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
lowerCamelCase__ = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
lowerCamelCase__ = SecondaryLearner(__lowerCAmelCase )
# Train secondary learner
lowerCamelCase__ = train_secondary_learner(
__lowerCAmelCase , __lowerCAmelCase , max_epochs=__lowerCAmelCase , batch_size=__lowerCAmelCase , eval_freq=100 , igf_model_path=__lowerCAmelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str=32 , __lowerCAmelCase : Tuple=1000 , __lowerCAmelCase : str=16 , __lowerCAmelCase : Optional[int]=1.0 , __lowerCAmelCase : List[str]=recopy_gpta , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : str=10 , __lowerCAmelCase : int="gpt2_finetuned.pt" , ):
lowerCamelCase__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
lowerCamelCase__ = RandomSampler(__lowerCAmelCase )
lowerCamelCase__ = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase )
lowerCamelCase__ = max_steps // (len(__lowerCAmelCase )) + 1
lowerCamelCase__ = 0
lowerCamelCase__ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = recopy_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(__lowerCAmelCase )
secondary_learner.eval()
lowerCamelCase__ = []
lowerCamelCase__ = 0
lowerCamelCase__ = []
lowerCamelCase__ = []
# Compute the performance of the transformer model at the beginning
lowerCamelCase__ = compute_perplexity(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
test_perps.append(__lowerCAmelCase )
print("""Test perplexity, step""" , __lowerCAmelCase , """:""" , __lowerCAmelCase )
for epoch in range(int(__lowerCAmelCase ) ):
for step, example in enumerate(__lowerCAmelCase ):
torch.cuda.empty_cache()
lowerCamelCase__ = random.randint(0 , example.size(2 ) - context_len - 1 )
lowerCamelCase__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowerCamelCase__ = model(__lowerCAmelCase , labels=__lowerCAmelCase )
lowerCamelCase__ = True
if secondary_learner is not None:
lowerCamelCase__ = secondary_learner.forward(
torch.tensor(__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__lowerCAmelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowerCamelCase__ = -1
if predicted_q < threshold:
lowerCamelCase__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowerCamelCase__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowerCamelCase__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowerCamelCase__ = compute_perplexity(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
test_perps.append(__lowerCAmelCase )
print("""Test perplexity, step""" , __lowerCAmelCase , """:""" , __lowerCAmelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __lowerCAmelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def A__ ( ):
lowerCamelCase__ = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=__lowerCAmelCase , type=__lowerCAmelCase , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=__lowerCAmelCase , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=__lowerCAmelCase , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1000 , type=__lowerCAmelCase , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=__lowerCAmelCase , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=__lowerCAmelCase , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=__lowerCAmelCase , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=__lowerCAmelCase , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1026 , type=__lowerCAmelCase , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=__lowerCAmelCase , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=__lowerCAmelCase , type=__lowerCAmelCase , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=__lowerCAmelCase , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__lowerCAmelCase , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=__lowerCAmelCase , type=__lowerCAmelCase , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowerCAmelCase , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
lowerCamelCase__ = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
lowerCamelCase__ = training_secondary_learner(
__lowerCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
lowerCamelCase__ = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowerCamelCase__ , lowerCamelCase__ = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1026 , trim=__lowerCAmelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowerCAmelCase , secondary_learner=__lowerCAmelCase , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 50 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCamelCase__ = v.half()
if save_path is None: # overwrite src_path
lowerCamelCase__ = src_path
torch.save(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 50 | 1 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : Any = OmegaConf.load(__lowerCamelCase )
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model''']
lowercase__ : List[str] = list(state_dict.keys() )
# extract state_dict for VQVAE
lowercase__ : Tuple = {}
lowercase__ : Union[str, Any] = '''first_stage_model.'''
for key in keys:
if key.startswith(__lowerCamelCase ):
lowercase__ : str = state_dict[key]
# extract state_dict for UNetLDM
lowercase__ : Any = {}
lowercase__ : List[Any] = '''model.diffusion_model.'''
for key in keys:
if key.startswith(__lowerCamelCase ):
lowercase__ : Union[str, Any] = state_dict[key]
lowercase__ : Union[str, Any] = config.model.params.first_stage_config.params
lowercase__ : Dict = config.model.params.unet_config.params
lowercase__ : str = VQModel(**__lowerCamelCase ).eval()
vqvae.load_state_dict(__lowerCamelCase )
lowercase__ : List[str] = UNetLDMModel(**__lowerCamelCase ).eval()
unet.load_state_dict(__lowerCamelCase )
lowercase__ : Optional[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCamelCase , )
lowercase__ : Dict = LDMPipeline(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
pipeline.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
lowerCAmelCase_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 122 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Dict = KandinskyInpaintPipeline
lowerCAmelCase : int = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
lowerCAmelCase : Any = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
lowerCAmelCase : Optional[Any] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase : int = False
@property
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return 100
@property
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : str = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1_005 ,)
lowercase__ : List[Any] = MultilingualCLIP(_snake_case )
lowercase__ : Any = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : int = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase__ : Tuple = UNetaDConditionModel(**_snake_case )
return model
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = self.dummy_text_encoder
lowercase__ : Tuple = self.dummy_tokenizer
lowercase__ : List[Any] = self.dummy_unet
lowercase__ : Any = self.dummy_movq
lowercase__ : List[Any] = DDIMScheduler(
num_train_timesteps=1_000 ,beta_schedule='''linear''' ,beta_start=0.0_0085 ,beta_end=0.012 ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,steps_offset=1 ,prediction_type='''epsilon''' ,thresholding=_snake_case ,)
lowercase__ : Dict = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ,_snake_case : int=0 ) -> str:
"""simple docstring"""
lowercase__ : Tuple = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(_snake_case ) ).to(_snake_case )
lowercase__ : Optional[int] = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(_snake_case )
# create init_image
lowercase__ : int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_snake_case ) ).to(_snake_case )
lowercase__ : Union[str, Any] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowercase__ : List[Any] = Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
lowercase__ : Any = np.ones((64, 64) ,dtype=np.floataa )
lowercase__ : List[Any] = 0
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : str = torch.manual_seed(_snake_case )
else:
lowercase__ : List[Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : Union[str, Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = '''cpu'''
lowercase__ : List[str] = self.get_dummy_components()
lowercase__ : Optional[int] = self.pipeline_class(**_snake_case )
lowercase__ : Dict = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : List[str] = pipe(**self.get_dummy_inputs(_snake_case ) )
lowercase__ : Optional[int] = output.images
lowercase__ : Union[str, Any] = pipe(
**self.get_dummy_inputs(_snake_case ) ,return_dict=_snake_case ,)[0]
lowercase__ : List[str] = image[0, -3:, -3:, -1]
lowercase__ : Any = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowercase__ : int = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
lowercase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase__ : int = np.ones((768, 768) ,dtype=np.floataa )
lowercase__ : Tuple = 0
lowercase__ : Union[str, Any] = '''a hat'''
lowercase__ : List[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' ,torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
lowercase__ : Optional[Any] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' ,torch_dtype=torch.floataa )
lowercase__ : Any = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
lowercase__ : int = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ , lowercase__ : Optional[Any] = pipe_prior(
_snake_case ,generator=_snake_case ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple()
lowercase__ : Dict = pipeline(
_snake_case ,image=_snake_case ,mask_image=_snake_case ,image_embeds=_snake_case ,negative_image_embeds=_snake_case ,generator=_snake_case ,num_inference_steps=100 ,height=768 ,width=768 ,output_type='''np''' ,)
lowercase__ : str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_snake_case ,_snake_case )
| 122 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__A = False
class _A ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Any ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Optional[Any] ) -> str:
__UpperCAmelCase =VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =pipe.dual_guided(
prompt="""first prompt""" , image=__SCREAMING_SNAKE_CASE , text_to_image_strength=0.75 , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =VersatileDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =generator.manual_seed(0 )
__UpperCAmelCase =pipe.dual_guided(
prompt="""first prompt""" , image=__SCREAMING_SNAKE_CASE , text_to_image_strength=0.75 , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _a ( self : List[Any] ) -> Dict:
__UpperCAmelCase =VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""cyberpunk 2077"""
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =pipe.dual_guided(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , text_to_image_strength=0.75 , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
__UpperCAmelCase =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase =np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__UpperCAmelCase ="""A painting of a squirrel eating a burger """
__UpperCAmelCase =torch.manual_seed(0 )
__UpperCAmelCase =pipe.text_to_image(
prompt=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
__UpperCAmelCase =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase =np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__UpperCAmelCase =pipe.image_variation(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , output_type="""numpy""" ).images
__UpperCAmelCase =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase =np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 68 |
import argparse
import os
import re
_lowercase : List[str] ="""src/diffusers"""
# Pattern that looks at the indentation in a line.
_lowercase : str =re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowercase : Dict =re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowercase : Union[str, Any] =re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowercase : Dict =re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowercase : Tuple =re.compile(r"""\[([^\]]+)\]""")
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : int = _re_indent.search(lowerCAmelCase__ )
return "" if search is None else search.groups()[0]
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__="" ,lowerCAmelCase__=None ,lowerCAmelCase__=None ):
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : Union[str, Any] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(lowerCAmelCase__ ):
index += 1
lowerCamelCase_ : Any = ['\n'.join(lines[:index] )]
else:
lowerCamelCase_ : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCamelCase_ : List[str] = [lines[index]]
index += 1
while index < len(lowerCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(lowerCAmelCase__ ) )
if index < len(lowerCAmelCase__ ) - 1:
lowerCamelCase_ : int = [lines[index + 1]]
index += 1
else:
lowerCamelCase_ : List[str] = []
else:
blocks.append('\n'.join(lowerCAmelCase__ ) )
lowerCamelCase_ : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCAmelCase__ ) > 0:
blocks.append('\n'.join(lowerCAmelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCAmelCase__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
def _inner(lowerCAmelCase__ ):
return key(lowerCAmelCase__ ).lower().replace('_' ,'' )
return _inner
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
# If no key is provided, we use a noop.
def noop(lowerCAmelCase__ ):
return x
if key is None:
lowerCamelCase_ : int = noop
# Constants are all uppercase, they go first.
lowerCamelCase_ : Any = [obj for obj in objects if key(lowerCAmelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCamelCase_ : Dict = [obj for obj in objects if key(lowerCAmelCase__ )[0].isupper() and not key(lowerCAmelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCamelCase_ : Any = [obj for obj in objects if not key(lowerCAmelCase__ )[0].isupper()]
lowerCamelCase_ : Optional[Any] = ignore_underscore(lowerCAmelCase__ )
return sorted(lowerCAmelCase__ ,key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ ,key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ ,key=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
# This inner function sort imports between [ ].
def _replace(lowerCAmelCase__ ):
lowerCamelCase_ : Dict = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
lowerCamelCase_ : Optional[int] = [part.strip().replace('"' ,'' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase_ : str = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(lowerCAmelCase__ )] ) + "]"
lowerCamelCase_ : Tuple = import_statement.split('\n' )
if len(lowerCAmelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCamelCase_ : int = 2 if lines[1].strip() == '[' else 1
lowerCamelCase_ : Any = [(i, _re_strip_line.search(lowerCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCamelCase_ : str = sort_objects(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : x[1] )
lowerCamelCase_ : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCAmelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCamelCase_ : Optional[int] = _re_bracket_content.sub(_replace ,lines[1] )
else:
lowerCamelCase_ : Any = [part.strip().replace('"' ,'' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase_ : List[Any] = keys[:-1]
lowerCamelCase_ : Optional[Any] = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(lowerCAmelCase__ )] )
return "\n".join(lowerCAmelCase__ )
else:
# Finally we have to deal with imports fitting on one line
lowerCamelCase_ : Any = _re_bracket_content.sub(_replace ,lowerCAmelCase__ )
return import_statement
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__=True ):
with open(lowerCAmelCase__ ,'r' ) as f:
lowerCamelCase_ : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCamelCase_ : int = split_code_in_indented_blocks(
lowerCAmelCase__ ,start_prompt='_import_structure = {' ,end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(lowerCAmelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCamelCase_ : Any = main_blocks[block_idx]
lowerCamelCase_ : Tuple = block.split('\n' )
# Get to the start of the imports.
lowerCamelCase_ : Optional[int] = 0
while line_idx < len(lowerCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCamelCase_ : List[Any] = len(lowerCAmelCase__ )
else:
line_idx += 1
if line_idx >= len(lowerCAmelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCamelCase_ : Tuple = '\n'.join(block_lines[line_idx:-1] )
lowerCamelCase_ : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCamelCase_ : Dict = split_code_in_indented_blocks(lowerCAmelCase__ ,indent_level=lowerCAmelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCamelCase_ : List[str] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCamelCase_ : Tuple = [(pattern.search(lowerCAmelCase__ ).groups()[0] if pattern.search(lowerCAmelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCamelCase_ : Any = [(i, key) for i, key in enumerate(lowerCAmelCase__ ) if key is not None]
lowerCamelCase_ : Optional[Any] = [x[0] for x in sorted(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCamelCase_ : int = 0
lowerCamelCase_ : Dict = []
for i in range(len(lowerCAmelCase__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCamelCase_ : Tuple = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowerCAmelCase__ )
count += 1
# And we put our main block back together with its first and last line.
lowerCamelCase_ : Tuple = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCAmelCase__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(lowerCAmelCase__ ,'w' ) as f:
f.write('\n'.join(lowerCAmelCase__ ) )
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__=True ):
lowerCamelCase_ : Dict = []
for root, _, files in os.walk(lowerCAmelCase__ ):
if "__init__.py" in files:
lowerCamelCase_ : Optional[int] = sort_imports(os.path.join(lowerCAmelCase__ ,'__init__.py' ) ,check_only=lowerCAmelCase__ )
if result:
lowerCamelCase_ : Dict = [os.path.join(lowerCAmelCase__ ,'__init__.py' )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(F"Would overwrite {len(lowerCAmelCase__ )} files, run `make style`." )
if __name__ == "__main__":
_lowercase : int =argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_lowercase : Union[str, Any] =parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 364 | 0 |
def __magic_name__ ( __a : Optional[int] , __a : List[Any] ):
'''simple docstring'''
UpperCamelCase__ = [0 for i in range(r + 1 )]
# nc0 = 1
UpperCamelCase__ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
UpperCamelCase__ = min(__a , __a )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 86 |
from timeit import timeit
def __magic_name__ ( __a : int ):
'''simple docstring'''
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase__ = 0
while number:
number &= number - 1
result += 1
return result
def __magic_name__ ( __a : int ):
'''simple docstring'''
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase__ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __magic_name__ ( ):
'''simple docstring'''
def do_benchmark(__a : int ) -> None:
UpperCamelCase__ = """import __main__ as z"""
print(f"Benchmark when {number = }:" )
print(f"{get_set_bits_count_using_modulo_operator(__a ) = }" )
UpperCamelCase__ = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__a )
print(f"timeit() runs in {timing} seconds" )
print(f"{get_set_bits_count_using_brian_kernighans_algorithm(__a ) = }" )
UpperCamelCase__ = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__a , )
print(f"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(__a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 86 | 1 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class _lowercase :
def __init__( self : str , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
A_ = model
A_ = kwargs.get('''model_save_dir''' , lowerCamelCase__ )
A_ = kwargs.get('''latest_model_name''' , lowerCamelCase__ )
def __call__( self : List[str] , **lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
A_ = {k: np.array(lowerCamelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCamelCase__ , lowerCamelCase__ )
@staticmethod
def UpperCamelCase ( lowerCamelCase__ : Union[str, Path] , lowerCamelCase__ : Any=None , lowerCamelCase__ : Optional[int]=None ) -> int:
"""simple docstring"""
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
A_ = '''CPUExecutionProvider'''
return ort.InferenceSession(lowerCamelCase__ , providers=[provider] , sess_options=lowerCamelCase__ )
def UpperCamelCase ( self : Any , lowerCamelCase__ : Union[str, Path] , lowerCamelCase__ : Optional[str] = None , **lowerCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
A_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A_ = self.model_save_dir.joinpath(self.latest_model_name )
A_ = Path(lowerCamelCase__ ).joinpath(lowerCamelCase__ )
try:
shutil.copyfile(lowerCamelCase__ , lowerCamelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A_ = self.model_save_dir.joinpath(lowerCamelCase__ )
if src_path.exists():
A_ = Path(lowerCamelCase__ ).joinpath(lowerCamelCase__ )
try:
shutil.copyfile(lowerCamelCase__ , lowerCamelCase__ )
except shutil.SameFileError:
pass
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : Union[str, os.PathLike] , **lowerCamelCase__ : Optional[Any] , ) -> Any:
"""simple docstring"""
if os.path.isfile(lowerCamelCase__ ):
logger.error(F"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
# saving model weights/files
self._save_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
@classmethod
def UpperCamelCase ( cls : Optional[Any] , lowerCamelCase__ : Union[str, Path] , lowerCamelCase__ : Optional[Union[bool, str, None]] = None , lowerCamelCase__ : Optional[Union[str, None]] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional["ort.SessionOptions"] = None , **lowerCamelCase__ : int , ) -> Optional[Any]:
"""simple docstring"""
A_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCamelCase__ ):
A_ = OnnxRuntimeModel.load_model(
os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , provider=lowerCamelCase__ , sess_options=lowerCamelCase__ )
A_ = Path(lowerCamelCase__ )
# load model from hub
else:
# download model
A_ = hf_hub_download(
repo_id=lowerCamelCase__ , filename=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , revision=lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , )
A_ = Path(lowerCamelCase__ ).parent
A_ = Path(lowerCamelCase__ ).name
A_ = OnnxRuntimeModel.load_model(lowerCamelCase__ , provider=lowerCamelCase__ , sess_options=lowerCamelCase__ )
return cls(model=lowerCamelCase__ , **lowerCamelCase__ )
@classmethod
def UpperCamelCase ( cls : List[str] , lowerCamelCase__ : Union[str, Path] , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , **lowerCamelCase__ : Union[str, Any] , ) -> Any:
"""simple docstring"""
A_ = None
if len(str(lowerCamelCase__ ).split('''@''' ) ) == 2:
A_ ,A_ = model_id.split('''@''' )
return cls._from_pretrained(
model_id=lowerCamelCase__ , revision=lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , **lowerCamelCase__ , )
| 203 |
import random
class _lowercase :
@staticmethod
def UpperCamelCase ( lowerCamelCase__ : str ) -> tuple[list[int], list[int]]:
"""simple docstring"""
A_ = [ord(lowerCamelCase__ ) for i in text]
A_ = []
A_ = []
for i in plain:
A_ = random.randint(1 , 3_0_0 )
A_ = (i + k) * k
cipher.append(lowerCamelCase__ )
key.append(lowerCamelCase__ )
return cipher, key
@staticmethod
def UpperCamelCase ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] ) -> str:
"""simple docstring"""
A_ = []
for i in range(len(lowerCamelCase__ ) ):
A_ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCamelCase__ ) )
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
__lowercase , __lowercase = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 203 | 1 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class SCREAMING_SNAKE_CASE__ ( pl.LightningModule ):
def __init__(self : List[str] , a__ : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__snake_case = model
__snake_case = 2
__snake_case = nn.Linear(self.model.config.hidden_size , self.num_labels )
def a (self : List[Any] ):
"""simple docstring"""
pass
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Dict ) -> str:
__snake_case = LongformerModel.from_pretrained(_A )
__snake_case = LightningModel(_A )
__snake_case = torch.load(_A , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
__snake_case = LongformerForQuestionAnswering.from_pretrained(_A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 719 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : List[str] = inspect.getfile(accelerate.test_utils )
A_ : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
A_ : int = ['accelerate', 'launch']
A_ : Tuple = Path.home() / '.cache/huggingface/accelerate'
A_ : List[Any] = 'default_config.yaml'
A_ : Optional[Any] = config_folder / config_file
A_ : Union[str, Any] = config_folder / '_default_config.yaml'
A_ : int = Path('tests/test_configs' )
@classmethod
def a (cls : Any ):
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def a (cls : List[str] ):
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def a (self : str ):
"""simple docstring"""
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=a__ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(a__ ), self.test_file_path] , env=os.environ.copy() )
def a (self : int ):
"""simple docstring"""
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : List[Any] = 'test-tpu'
A_ : List[str] = 'us-central1-a'
A_ : int = 'ls'
A_ : Tuple = ['accelerate', 'tpu-config']
A_ : Union[str, Any] = 'cd /usr/share'
A_ : int = 'tests/test_samples/test_command_file.sh'
A_ : int = 'Running gcloud compute tpus tpu-vm ssh'
def a (self : Any ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a__ , )
def a (self : str ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a__ , )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=a__ )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a__ , )
def a (self : str ):
"""simple docstring"""
__snake_case = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a__ , )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a__ , )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a__ , )
def a (self : Any ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a__ , )
def a (self : Any ):
"""simple docstring"""
__snake_case = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a__ , )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=a__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a__ , )
| 388 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowerCAmelCase = 1
_lowerCAmelCase = 1
while repunit:
_lowerCAmelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCamelCase__ ( lowerCAmelCase = 1_00_00_00 ):
"""simple docstring"""
_lowerCAmelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 207 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : int ={
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] =[
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
A__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 207 | 1 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class a_ :
"""simple docstring"""
def __init__( self : Tuple ,snake_case : List[str] ,snake_case : List[str]=13 ,snake_case : Optional[Any]=7 ,snake_case : Union[str, Any]=False ,snake_case : str=True ,snake_case : Tuple=False ,snake_case : List[Any]=True ,snake_case : Tuple=33 ,snake_case : Dict=32 ,snake_case : str=5 ,snake_case : str=4 ,snake_case : int=37 ,snake_case : int="gelu" ,snake_case : int=0.1 ,snake_case : Dict=0.1 ,snake_case : int=512 ,snake_case : Optional[Any]=16 ,snake_case : List[Any]=2 ,snake_case : Tuple=0.02 ,snake_case : int=3 ,snake_case : Tuple=4 ,snake_case : List[str]=None ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_input_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =num_choices
SCREAMING_SNAKE_CASE =scope
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : str ):
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Union[str, Any] ,snake_case : Tuple ,snake_case : List[Any] ,snake_case : List[str] ,snake_case : str ):
SCREAMING_SNAKE_CASE =EsmModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : int ,snake_case : str ,snake_case : Tuple ,snake_case : List[str] ,snake_case : Any ,snake_case : Any ):
SCREAMING_SNAKE_CASE =EsmForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : str ,snake_case : str ,snake_case : Optional[Any] ,snake_case : Any ,snake_case : List[Any] ,snake_case : Dict ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =EsmForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = False
__UpperCAmelCase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = ()
__UpperCAmelCase = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =EsmModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : str ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE =type
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def _lowerCAmelCase ( self : Any ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =EsmModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE =EsmEmbeddings(config=snake_case )
SCREAMING_SNAKE_CASE =torch.as_tensor([[12, 31, 13, model.padding_idx]] )
SCREAMING_SNAKE_CASE =torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
SCREAMING_SNAKE_CASE =create_position_ids_from_input_ids(snake_case ,model.padding_idx )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(snake_case ,snake_case ) ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE =EsmEmbeddings(config=snake_case )
SCREAMING_SNAKE_CASE =torch.empty(2 ,4 ,30 )
SCREAMING_SNAKE_CASE =[
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
SCREAMING_SNAKE_CASE =torch.as_tensor([expected_single_positions, expected_single_positions] )
SCREAMING_SNAKE_CASE =embeddings.create_position_ids_from_inputs_embeds(snake_case )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(snake_case ,snake_case ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def _lowerCAmelCase ( self : Dict ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCAmelCase ( self : Optional[int] ):
pass
@require_torch
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Optional[int] ):
with torch.no_grad():
SCREAMING_SNAKE_CASE =EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
SCREAMING_SNAKE_CASE =torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
SCREAMING_SNAKE_CASE =33
SCREAMING_SNAKE_CASE =torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
with torch.no_grad():
SCREAMING_SNAKE_CASE =EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
SCREAMING_SNAKE_CASE =torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE =torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,snake_case ,atol=1e-4 ) )
| 252 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase ={
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 | 1 |
def lowerCAmelCase_ ( _lowercase : list) -> Optional[int]:
"""simple docstring"""
def merge(_lowercase : list , _lowercase : list) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0)
yield from left
yield from right
return list(_merge())
if len(lowerCAmelCase__) <= 1:
return collection
a__ : str = len(lowerCAmelCase__) // 2
return merge(merge_sort(collection[:mid]) , merge_sort(collection[mid:]))
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Tuple =input("Enter numbers separated by a comma:\n").strip()
_lowercase : str =[int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 136 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
a : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=_lowerCAmelCase )
class _a :
A = 42
A = 42
A = None
A = None
A = None
@dataclass(frozen=_lowerCAmelCase )
class _a :
A = 42
A = None
A = None
A = None
A = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _a ( _lowerCAmelCase ):
A = 42
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_ = False, ) -> Union[str, Any]:
UpperCAmelCase_: Optional[Any] = hans_processors[task]()
UpperCAmelCase_: Dict = os.path.join(
SCREAMING_SNAKE_CASE_, """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""", tokenizer.__class__.__name__, str(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_, ), )
UpperCAmelCase_: Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_: Tuple = label_list[2], label_list[1]
UpperCAmelCase_: str = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_: int = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
UpperCAmelCase_: Dict = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
UpperCAmelCase_: Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""", len(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: List[Any] = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""", SCREAMING_SNAKE_CASE_ )
torch.save(self.features, SCREAMING_SNAKE_CASE_ )
def __len__(self ) -> Optional[int]:
return len(self.features )
def __getitem__(self, SCREAMING_SNAKE_CASE_ ) -> InputFeatures:
return self.features[i]
def __snake_case (self ) -> List[str]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class _a :
A = 42
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 128, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_ = False, ) -> Optional[int]:
UpperCAmelCase_: Union[str, Any] = hans_processors[task]()
UpperCAmelCase_: List[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_: List[str] = label_list[2], label_list[1]
UpperCAmelCase_: Dict = label_list
UpperCAmelCase_: List[str] = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ), desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_: List[str] = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_, (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
), (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
), )
def __snake_case (self ) -> Optional[Any]:
return self.dataset
def __len__(self ) -> Optional[int]:
return len(self.features )
def __getitem__(self, SCREAMING_SNAKE_CASE_ ) -> InputFeatures:
return self.features[i]
def __snake_case (self ) -> Union[str, Any]:
return self.label_list
class _a ( _lowerCAmelCase ):
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Dict:
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_, """heuristics_train_set.txt""" ) ), """train""" )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Dict:
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_, """heuristics_evaluation_set.txt""" ) ), """dev""" )
def __snake_case (self ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCAmelCase_: Optional[Any] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
UpperCAmelCase_: Tuple = """%s-%s""" % (set_type, line[0])
UpperCAmelCase_: int = line[5]
UpperCAmelCase_: int = line[6]
UpperCAmelCase_: List[str] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
UpperCAmelCase_: Any = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_, text_a=SCREAMING_SNAKE_CASE_, text_b=SCREAMING_SNAKE_CASE_, label=SCREAMING_SNAKE_CASE_, pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowerCAmelCase_ (lowerCAmelCase__: List[InputExample] , lowerCAmelCase__: List[str] , lowerCAmelCase__: int , lowerCAmelCase__: PreTrainedTokenizer , ):
"""simple docstring"""
UpperCAmelCase_: List[str] = {label: i for i, label in enumerate(lowerCAmelCase__ )}
UpperCAmelCase_: Optional[int] = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCAmelCase__ ) , desc="""convert examples to features""" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d""" % (ex_index) )
UpperCAmelCase_: Tuple = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" , truncation=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , )
UpperCAmelCase_: Optional[Any] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_: Tuple = int(example.pairID )
features.append(InputFeatures(**lowerCAmelCase__ , label=lowerCAmelCase__ , pairID=lowerCAmelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
a : Optional[int] = {
'hans': 3,
}
a : Dict = {
'hans': HansProcessor,
}
| 556 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase ( ) -> Dict:
__a = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=lowerCAmelCase__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=lowerCAmelCase__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=lowerCAmelCase__ )
return parser.parse_args()
def lowercase ( ) -> List[str]:
__a = parse_args()
# Import training_script as a module.
__a = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__a = script_fpath.stem
__a = importlib.import_module(lowerCAmelCase__ )
# Patch sys.argv
__a = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 65 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=2 , _a=8 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=16 , _a=5 , _a=2 , _a=36 , _a="gelu" , _a=0.0 , _a=0.0 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self ):
__a = self.get_config()
__a = 300
return config
def __UpperCAmelCase ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a )
__a = model(_a , token_type_ids=_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = True
__a = MraModel(_a )
model.to(_a )
model.eval()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
__a = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , )
__a = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraForMaskedLM(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = MraForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = MraForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = MraForTokenClassification(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = self.num_choices
__a = MraForMultipleChoice(config=_a )
model.to(_a )
model.eval()
__a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Dict = ()
def __UpperCAmelCase ( self ):
__a = MraModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MraModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''MRA does not output attentions''' )
def __UpperCAmelCase ( self ):
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__a = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = 50_265
__a = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__a = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__a = model(_a )[0]
__a = 50_265
__a = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
| 65 | 1 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__A = """scheduler_config.json"""
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = 1
__magic_name__ :Optional[Any] = 2
__magic_name__ :Optional[Any] = 3
__magic_name__ :List[Any] = 4
__magic_name__ :int = 5
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :jnp.ndarray
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Tuple = SCHEDULER_CONFIG_NAME
__magic_name__ :Dict = ["""dtype"""]
__magic_name__ :str = []
__magic_name__ :Tuple = True
@classmethod
def snake_case ( cls , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = cls.load_config(
pretrained_model_name_or_path=__UpperCAmelCase , subfolder=__UpperCAmelCase , return_unused_kwargs=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = cls.from_config(__UpperCAmelCase , return_unused_kwargs=__UpperCAmelCase , **__UpperCAmelCase )
if hasattr(__UpperCAmelCase , 'create_state' ) and getattr(__UpperCAmelCase , 'has_state' , __UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = False , **__UpperCAmelCase ):
'''simple docstring'''
self.save_config(save_directory=__UpperCAmelCase , push_to_hub=__UpperCAmelCase , **__UpperCAmelCase )
@property
def snake_case ( self ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def snake_case ( cls ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase__ :Any = importlib.import_module(__name__.split('.' )[0] )
lowerCAmelCase__ :Union[str, Any] = [
getattr(__UpperCAmelCase , __UpperCAmelCase ) for c in compatible_classes_str if hasattr(__UpperCAmelCase , __UpperCAmelCase )
]
return compatible_classes
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->jnp.ndarray:
"""simple docstring"""
assert len(_SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_SCREAMING_SNAKE_CASE ) - x.ndim) ) , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.9_9_9 , _SCREAMING_SNAKE_CASE=jnp.floataa ) ->jnp.ndarray:
"""simple docstring"""
def alpha_bar(_SCREAMING_SNAKE_CASE ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
lowerCAmelCase__ :Tuple = []
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[int] = i / num_diffusion_timesteps
lowerCAmelCase__ :List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_SCREAMING_SNAKE_CASE ) / alpha_bar(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return jnp.array(_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :jnp.ndarray
__magic_name__ :jnp.ndarray
__magic_name__ :jnp.ndarray
@classmethod
def snake_case ( cls , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = scheduler.config
if config.trained_betas is not None:
lowerCAmelCase__ :Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCAmelCase__ :Tuple = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ :Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ :str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" )
lowerCAmelCase__ :Optional[Any] = 1.0 - betas
lowerCAmelCase__ :Union[str, Any] = jnp.cumprod(__UpperCAmelCase , axis=0 )
return cls(
alphas=__UpperCAmelCase , betas=__UpperCAmelCase , alphas_cumprod=__UpperCAmelCase , )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = state.alphas_cumprod
lowerCAmelCase__ :Union[str, Any] = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase__ :Dict = sqrt_alpha_prod.flatten()
lowerCAmelCase__ :Any = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape )
lowerCAmelCase__ :Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase__ :Optional[int] = sqrt_one_minus_alpha_prod.flatten()
lowerCAmelCase__ :Optional[int] = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :str = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 93 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Dict , _lowercase :TransformeraDModel , _lowercase :AutoencoderKL , _lowercase :KarrasDiffusionSchedulers , _lowercase :Optional[Dict[int, str]] = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase )
# create a imagenet -> id dictionary for easier use
lowercase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ = int(_lowercase )
lowercase__ = dict(sorted(self.labels.items() ) )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Union[str, List[str]] ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
lowercase__ = list(_lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Optional[Any] , _lowercase :List[int] , _lowercase :float = 4.0 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :int = 50 , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = len(_lowercase )
lowercase__ = self.transformer.config.sample_size
lowercase__ = self.transformer.config.in_channels
lowercase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , )
lowercase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ = torch.tensor(_lowercase , device=self.device ).reshape(-1 )
lowercase__ = torch.tensor([10_00] * batch_size , device=self.device )
lowercase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ = latent_model_input[: len(_lowercase ) // 2]
lowercase__ = torch.cat([half, half] , dim=0 )
lowercase__ = self.scheduler.scale_model_input(_lowercase , _lowercase )
lowercase__ = t
if not torch.is_tensor(_lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ = latent_model_input.device.type == "mps"
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.floataa if is_mps else torch.floataa
else:
lowercase__ = torch.intaa if is_mps else torch.intaa
lowercase__ = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ = self.transformer(
_lowercase , timestep=_lowercase , class_labels=_lowercase ).sample
# perform guidance
if guidance_scale > 1:
lowercase__ , lowercase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__ , lowercase__ = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 )
lowercase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ = torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__ , lowercase__ = torch.split(_lowercase , _lowercase , dim=1 )
else:
lowercase__ = noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
if guidance_scale > 1:
lowercase__ , lowercase__ = latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ = latent_model_input
lowercase__ = 1 / self.vae.config.scaling_factor * latents
lowercase__ = self.vae.decode(_lowercase ).sample
lowercase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase_ (metaclass=_UpperCAmelCase ):
A__ : Any = ['''torch''', '''torchsde''']
def __init__( self , *a_ , **a_ ) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def lowerCamelCase__ ( cls , *a_ , **a_ ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def lowerCamelCase__ ( cls , *a_ , **a_ ) ->str:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
| 612 |
"""simple docstring"""
def lowerCAmelCase ( UpperCamelCase_: Optional[int] , UpperCamelCase_: str ) -> List[Any]:
'''simple docstring'''
_a = (boundary[1] - boundary[0]) / steps
_a = boundary[0]
_a = boundary[1]
_a = make_points(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_a = 0.0
y += (h / 2.0) * f(UpperCamelCase_ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase_ )
y += (h / 2.0) * f(UpperCamelCase_ )
return y
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: Any , UpperCamelCase_: Any ) -> str:
'''simple docstring'''
_a = a + h
while x < (b - h):
yield x
_a = x + h
def lowerCAmelCase ( UpperCamelCase_: List[str] ) -> int: # enter your function here
'''simple docstring'''
_a = (x - 0) * (x - 0)
return y
def lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
_a = 0.0 # Lower bound of integration
_a = 1.0 # Upper bound of integration
_a = 10.0 # define number of steps or resolution
_a = [a, b] # define boundary of integration
_a = method_a(UpperCamelCase_ , UpperCamelCase_ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 612 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCAmelCase : Dict = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def A_( A : str , A : Tuple=None , A : int=None , A : Union[str, Any]=None):
UpperCamelCase = True
while ask_again:
UpperCamelCase = input(A)
try:
if default is not None and len(A) == 0:
return default
return convert_value(A) if convert_value is not None else result
except Exception:
if error_message is not None:
print(A)
def A_( A : Any , A : Union[str, Any]=[] , A : Dict=None , A : List[str]=0):
UpperCamelCase = BulletMenu(A , A)
UpperCamelCase = menu.run(default_choice=A)
return convert_value(A) if convert_value is not None else result
def A_( A : Tuple):
UpperCamelCase = int(A)
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value])
def A_( A : List[str]):
UpperCamelCase = int(A)
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value])
def A_( A : str):
UpperCamelCase = int(A)
return DynamoBackend(DYNAMO_BACKENDS[value]).value
def A_( A : Any):
UpperCamelCase = int(A)
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value])
def A_( A : Optional[Any]):
UpperCamelCase = int(A)
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value])
def A_( A : int):
return {"yes": True, "no": False}[value.lower()]
class SCREAMING_SNAKE_CASE__ ( argparse.RawDescriptionHelpFormatter):
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> int:
'''simple docstring'''
UpperCamelCase = super()._format_usage(A_ , A_ , A_ , A_ )
UpperCamelCase = usage.replace('<command> [<args>] ' , '' )
return usage
| 3 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def UpperCamelCase__ ( ) -> Dict:
_lowercase = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
_lowercase = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
TestCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Parse args
_lowercase , _lowercase = parser.parse_known_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , """func""" ):
parser.print_help()
exit(1 )
_lowercase = parse_unknown_args(SCREAMING_SNAKE_CASE_ )
# Run
_lowercase = args.func(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main() | 287 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 131 |
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 131 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = '▁'
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
SCREAMING_SNAKE_CASE_ = {
'google/pegasus-xsum': 512,
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PegasusTokenizer
A_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_="<pad>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<mask_2>" , lowerCamelCase_="<mask_1>" , lowerCamelCase_=None , lowerCamelCase_=1_0_3 , **lowerCamelCase_ , ) -> Optional[int]:
UpperCamelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
raise TypeError(
F'additional_special_tokens should be of type {type(lowerCamelCase_)}, but is'
F' {type(lowerCamelCase_)}')
UpperCamelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(lowerCamelCase_) , self.offset - 1)
]
if len(set(lowerCamelCase_)) != len(lowerCamelCase_):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.')
UpperCamelCase = additional_special_tokens_extended
else:
UpperCamelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 , self.offset)]
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , pad_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , mask_token_sent=lowerCamelCase_ , offset=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[Any]:
UpperCamelCase = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens) + 3)):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
F' {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}')
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase_)
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase_) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_):
copyfile(self.vocab_file , lowerCamelCase_)
return (out_vocab_file,) | 34 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowercase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : List[str] = num_channels
lowercase__ : str = image_size
lowercase__ : int = min_resolution
lowercase__ : Dict = max_resolution
lowercase__ : Tuple = do_resize
lowercase__ : Union[str, Any] = size
lowercase__ : Any = do_normalize
lowercase__ : Tuple = image_mean
lowercase__ : str = image_std
def lowercase__ ( self):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = EfficientFormerImageProcessorTester(self)
@property
def lowercase__ ( self):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize"""))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size"""))
def lowercase__ ( self):
'''simple docstring'''
pass
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray)
# Test not batched input
lowercase__ : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor)
# Test not batched input
lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
lowercase__ : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 12 | 0 |
"""simple docstring"""
import random
def __A ( a_ : int )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = num - 1
SCREAMING_SNAKE_CASE : str = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE : Any = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE : Optional[int] = pow(a_ , a_ , a_ )
if v != 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE : int = i + 1
SCREAMING_SNAKE_CASE : int = (v**2) % num
return True
def __A ( a_ : int )-> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE : List[str] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(a_ )
def __A ( a_ : int = 10_24 )-> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(a_ ):
return num
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 721 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 0 |
"""simple docstring"""
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : str = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __SCREAMING_SNAKE_CASE ( A_ ):
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __SCREAMING_SNAKE_CASE ( A_ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCamelCase_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase__ : Tuple = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(UpperCamelCase_ , id=UpperCamelCase_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
if exitstatus == 5:
lowerCAmelCase__ : int = 0
# Doctest custom flag to ignore output.
__UpperCamelCase : List[str] = doctest.register_optionflag('''IGNORE_RESULT''')
__UpperCamelCase : Optional[Any] = doctest.OutputChecker
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ,lowercase_ : Dict ,lowercase_ : Optional[int] ,lowercase_ : Tuple ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self ,lowercase_ ,lowercase_ ,lowercase_ )
__UpperCamelCase : Any = CustomOutputChecker
__UpperCamelCase : str = HfDoctestModule
__UpperCamelCase : Optional[int] = HfDocTestParser
| 450 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase__ : Tuple = 5
UpperCAmelCase__ : List[Any] = 10
@require_sentencepiece
@require_tokenizers
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Tuple = SpeechaTextTokenizer
snake_case__ :Dict = False
snake_case__ :Optional[int] = True
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = sp.SentencePieceProcessor()
spm_model.Load(__magic_name__ )
lowerCAmelCase__ = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )]
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = "<pad>"
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__magic_name__ ) , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [289, 50, 14, 174, 386] , )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class A ( unittest.TestCase ):
snake_case__ :Union[str, Any] = 'valhalla/s2t_mustc_multilinguial_medium'
snake_case__ :Tuple = 'C\'est trop cool'
snake_case__ :List[str] = 'Esto es genial'
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(__magic_name__ , self.tokenizer.all_special_ids )
lowerCAmelCase__ = [ES_CODE, 4, 1601, 47, 7647, 2]
lowerCAmelCase__ = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
lowerCAmelCase__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __magic_name__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCAmelCase__ = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 48 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__( self : List[Any] , __a : Any , ) -> Dict:
__UpperCAmelCase = parent
__UpperCAmelCase = 1_3
__UpperCAmelCase = 7
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = 9_9
__UpperCAmelCase = 3_2
__UpperCAmelCase = 2
__UpperCAmelCase = 4
__UpperCAmelCase = 3_7
__UpperCAmelCase = '''gelu'''
__UpperCAmelCase = 0.1
__UpperCAmelCase = 0.1
__UpperCAmelCase = 5_1_2
__UpperCAmelCase = 1_6
__UpperCAmelCase = 2
__UpperCAmelCase = 0.0_2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = None
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any:
__UpperCAmelCase = TFDistilBertModel(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
__UpperCAmelCase = [input_ids, input_mask]
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int:
__UpperCAmelCase = TFDistilBertForMaskedLM(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict:
__UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForSequenceClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = TFDistilBertForMultipleChoice(__a )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForTokenClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = TFDistilBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def snake_case__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def snake_case__ ( self : List[str] ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCAmelCase = TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , __a )
__UpperCAmelCase = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 721 | '''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCAmelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCAmelCase : Optional[int] = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
__UpperCAmelCase = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(UpperCamelCase__ )
all_models.sort()
__UpperCAmelCase = {'''model_type''': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = '''AutoTokenizer'''
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
__UpperCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ )
__UpperCAmelCase = Dataset.from_json(UpperCamelCase__ )
__UpperCAmelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(UpperCamelCase__ ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['''pt''']
if isinstance(UpperCamelCase__ , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ''', '''.join(UpperCamelCase__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__lowerCAmelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 654 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.